code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013 - TODAY Denero Team. (<http://www.deneroteam.com>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp import addons
from openerp.tools.translate import _
class res_partner_title(orm.Model):
_inherit = "res.partner.title"
_order = "sequence"
_columns = {
'sequence': fields.integer('Sequence'),
}
_defaults = {
'sequence': 1,
}
class res_contact_function(orm.Model):
_name = "res.contact.function"
_description = "Contact Function"
_order = "name"
_columns = {
'name': fields.char('Name', size=32),
}
class res_partner_address_contact(orm.Model):
_name = "res.partner.address.contact"
_description = "Address Contact"
def name_get(self, cr, uid, ids, context=None):
res = []
for rec in self.browse(cr, uid, ids, context=context):
if rec.title:
res.append((rec.id, rec.title.name + ' ' + rec.last_name + ' ' + (rec.first_name or '')))
else:
res.append((rec.id, rec.last_name + ' ' + (rec.first_name or '')))
return res
def _name_get_full(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
if rec.title:
result[rec.id] = rec.title.name + ' ' + rec.last_name + ' ' + (rec.first_name or '')
else:
result[rec.id] = rec.last_name + ' ' + (rec.first_name or '')
return result
_columns = {
'complete_name': fields.function(_name_get_full, string='Name', size=64, type="char", store=False, select=True),
'name': fields.char('Name', size=64, ),
'last_name': fields.char('Last Name', size=64, required=True),
'first_name': fields.char('First Name', size=64),
'mobile': fields.char('Mobile', size=64),
'fisso': fields.char('Phone', size=64),
'title': fields.many2one('res.partner.title', 'Title', domain=[('domain', '=', 'contact')]),
'website': fields.char('Website', size=120),
'address_id': fields.many2one('res.partner.address', 'Address'),
'partner_id': fields.related(
'address_id', 'partner_id', type='many2one', relation='res.partner', string='Main Employer'),
'lang_id': fields.many2one('res.lang', 'Language'),
'country_id': fields.many2one('res.country', 'Nationality'),
'birthdate': fields.char('Birthdate', size=64),
'active': fields.boolean('Active', help="If the active field is set to False,\
it will allow you to hide the partner contact without removing it."),
'email': fields.char('E-Mail', size=240),
'comment': fields.text('Notes', translate=True),
'photo': fields.binary('Photo'),
'function': fields.char("Function", size=64),
'function_id': fields.many2one('res.contact.function', 'Function'),
}
def _get_photo(self, cr, uid, context=None):
photo_path = addons.get_module_resource('base_address_contacts', 'images', 'photo.png')
return open(photo_path, 'rb').read().encode('base64')
_defaults = {
'name': '/',
'photo': _get_photo,
'active': True,
'address_id': lambda self, cr, uid, context: context.get('address_id', False),
}
_order = "last_name"
def name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=None):
if not args:
args = []
if context is None:
context = {}
if name:
ids = self.search(
cr, uid, ['|', ('last_name', operator, name), ('first_name', operator, name)] + args, limit=limit,
context=context)
else:
ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
name = ''
update = False
if vals.get('last_name', False):
name += vals['last_name']
update = True
if vals.get('first_name', False):
name += ' ' + vals['first_name']
update = True
if update:
vals['name'] = name
return super(res_partner_address_contact, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
name = ''
update = False
if vals.get('last_name', False):
name += vals['last_name']
update = True
if vals.get('first_name', False):
name += ' ' + vals['first_name']
update = True
if update:
vals['name'] = name
return super(res_partner_address_contact, self).write(cr, uid, ids, vals, context)
class res_partner_address(orm.Model):
_inherit = 'res.partner.address'
def get_full_name(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for re in self.browse(cr, uid, ids, context=context):
addr = ''
if re.partner_id:
if re.partner_id.name != re.name:
addr = re.name or ''
if re.name and (re.city or re.country_id):
addr += ', '
addr += (re.city or '') + ', ' + (re.street or '')
if re.partner_id and context.get('contact_display', False) == 'partner_address':
addr = "%s: %s" % (re.partner_id.name, addr.strip())
else:
addr = addr.strip()
res[re.id] = addr or ''
return res
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
res = []
length = context.get('name_lenght', False) or 45
for record in self.browse(cr, uid, ids, context=context):
name = record.complete_name or record.name or ''
if len(name) > length:
name = name[:length] + '...'
res.append((record.id, name))
return res
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if context is None:
context = {}
ids = []
name_array = name.split()
search_domain = []
for n in name_array:
search_domain.append('|')
search_domain.append(('name', operator, n))
search_domain.append(('complete_name', operator, n))
ids = self.search(cr, user, search_domain + args, limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner Name', ondelete='set null', select=True, help="Keep empty for a private address, not related to partner.", required=True),
'contact_ids': fields.one2many('res.partner.address.contact', 'address_id', 'Functions and Contacts'),
'mobile': fields.char('Mobile', size=64),
'pec': fields.char('PEC', size=64),
'complete_name': fields.function(get_full_name, method=True, type='char', size=1024, readonly=True, store=False),
}
class res_partner(orm.Model):
_inherit = 'res.partner'
def _get_contacts(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for partner in self.browse(cr, uid, ids, context):
result[partner.id] = []
for address in partner.address:
result[partner.id] += [contact.id for contact in address.contact_ids]
return result
def create(self, cr, uid, vals, context):
if context is None:
context = {}
if context.get('import', False):
return super(res_partner, self).create(cr, uid, vals, context)
if not vals.get('address', False) and context.get('default_type', '') != 'lead':
raise orm.except_orm(_('Error!'),
_('At least one address of type "Default" is needed!'))
is_default = False
if context.get('default_type', '') == 'lead':
return super(res_partner, self).create(cr, uid, vals, context)
for address in vals['address']:
if address[2].get('type') == 'default':
is_default = True
if not is_default:
raise orm.except_orm(_('Error!'),
_('At least one address of type "Default" is needed!'))
return super(res_partner, self).create(cr, uid, vals, context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if vals.get('address', False):
for address in vals['address']:
if address[0] == 2: # 2 means 'delete'
if self.pool['res.partner.address'].browse(cr, uid, address[1], context).type == 'default':
raise orm.except_orm(_('Error!'),
_('At least one address of type "Default" is needed!'))
return super(res_partner, self).write(cr, uid, ids, vals, context)
def unlink(self, cr, uid, ids, context):
if context is None:
context = {}
for partner in self.browse(cr, uid, ids, context):
if partner.address:
raise orm.except_orm(_('Error!'),
_('Before Delete the Partner, you need to delete the Address from menù'))
return super(res_partner, self).unlink(cr, uid, ids, context)
_columns = {
'contact_ids': fields.function(_get_contacts, string=_("Functions and Contacts"), type='one2many', method=True, obj='res.partner.address.contact')
}
| odoousers2014/LibrERP | base_address_contacts/res_partner_address.py | Python | agpl-3.0 | 10,827 |
#! /usr/bin/env python
# vim:ts=4:sw=4:expandtab 2
# -*- coding: utf-8 -*-
'''
Based on https://github.com/mortnerDHCPv4v6
'''
# TODO:
# * refactor
# * read conf from dhclient.conf
# * daemonize
# * requests in loop
# * send renew according to renew time
# * implement release
# * implement nak case
# FIXME:
# * build package with most common BOOTP/DCHCP options
__author__ = "duy <duy at rhizoma dot tk>"
__copyright__ = "GPL v3"
from scapy.all import *
import random
import ipaddr
from time import sleep
import subprocess
import argparse
# for debugging
#CLIENT_PORT= 8001
#SERVER_PORT= 8000
CLIENT_PORT= 68
SERVER_PORT= 67
BROADCAST_ADDR = '255.255.255.255'
META_ADDR = '0.0.0.0'
BROADCAST_MAC = 'ff:ff:ff:ff:ff:ff'
MAX_DHCP_LEASE = 1500
LEASE_TIME = 43200 # 7776000
# "subnet_mask", "router", "name_server", "domain"
PARAM_REQ_LIST = '\x01\x03\x06\x0fw\xfc'# \x1c3
INIT_STATE = 0
BOUND_STATE = 1
RENEW_STATE = 2
REBIND_STATE = 3
REBOOT_STATE = 4
TIMEOUT_STATE = 5
RENEW_TIME_ON_LEASE = 1.0/2
REBIND_TIME_ON_LEASE = 7.0/8
class Limits:
XID_MIN = 1
XID_MAX = 900000000
def randomHostname(length=8, charset=None):
charset = charset or string.ascii_uppercase + string.digits
return ''.join(random.choice(charset) for x in range(length))
def genXid():
return random.randint(Limits.XID_MIN, Limits.XID_MAX)
class DHCPv4Client(object):
def __init__(self, iface, server_port=None, client_port=None, server_ip=None,
server_mac=None, hostname=None):
self.iface = iface
self.state = INIT_STATE
self.renew_time = 0
self.rebind_time = 0
self.server_port = server_port or SERVER_PORT
self.client_port = client_port or CLIENT_PORT
self.server_ip = server_ip or BROADCAST_ADDR
self.server_mac = server_mac or BROADCAST_MAC
self.client_ip = META_ADDR
_, client_mac = get_if_raw_hwaddr(self.iface)
self.client_mac = client_mac
self.hostname = hostname or randomHostname()
self.client_xid = genXid()
# FIXME: when server xid is used?
self.server_xid = None
self.server_id = None
self.response_server_ip = None
self.response_server_mac = None
self.client_ip_offered = None
self.subnet_mask = None
self.offered_ip = None
self.lease_time = None
self.router = None
self.name_server = None
self.domain = None
self.options = []
self.callbacks = {}
self.history = []
def __str__(self):
return self.__repr__()
def __repr__(self):
return """DHCPv4 Client
Interface: %sp
Verbosity: %s
Client Configuration: | Server
-------------------------------------|------------------------------
IP = %-20s %-20s
HWAddr = %-20s %-20s
Hostname = %-20s
MASK = %-20s
xID = %-20s %-20s
DHCP Specific
--------------------
serverID = %-20s
Options = %-20s
Registered Callbacks
--------------------
%s
History
--------------------
%s
""" % (conf.iface, conf.verb,
self.client_ip,
self.server_ip,
self.client_mac,
self.server_mac,
self.hostname,
self.subnet_mask,
self.client_xid,
self.server_xid,
self.server_id,
repr(self.options),
self.callbacks,
self.history)
def register_callback(self, hook, func):
self.callbacks[hook] = func
def exec_callback(self, hook, args):
self.track_history("Hook:" + str(hook))
if self.callbacks.has_key(hook):
self.callbacks[hook]()
def track_history(self, name=None):
from inspect import stack
name = name or stack()[1][3]
self.history.append(name)
def genDiscover(self):
dhcp_discover = (
Ether(src=str2mac(self.client_mac), dst=self.server_mac) /
IP(src=self.client_ip, dst=self.server_ip) /
UDP(sport=self.client_port, dport=self.server_port) /
BOOTP(chaddr=[self.client_mac], xid=self.client_xid) /
DHCP(options=[
("message-type", "discover"),
("param_req_list", PARAM_REQ_LIST),
("max_dhcp_size", MAX_DHCP_LEASE),
("client_id", self.client_mac),
("lease_time", LEASE_TIME),
("hostname", self.hostname),
"end"
])
)
return dhcp_discover
def genRequest(self):
dhcp_req = (
Ether(src=str2mac(self.client_mac), dst=self.server_mac) /
IP(src=self.client_ip, dst=self.server_ip) /
UDP(sport=self.client_port, dport=self.server_port) /
BOOTP(chaddr=[self.client_mac], xid=self.client_xid) /
DHCP(options=[
("message-type", "request"),
("param_req_list", PARAM_REQ_LIST),
("max_dhcp_size", MAX_DHCP_LEASE),
("client_id", self.client_mac),
("requested_addr", self.client_ip_offered), # obtained from discover
("server_id", self.server_id), # obtained from discover
("hostname", self.hostname),
"end"
])
)
return dhcp_req
def genRelease(self):
dhcp_release = (
Ether(src=str2mac(self.client_mac), dst=self.server_mac) /
IP(src=self.client_ip, dst=self.server_ip) /
UDP(sport=self.client_port, dport=self.server_port) /
BOOTP(chaddr=[self.client_mac], xid=self.client_xid) /
DHCP(options=[
("message-type", "release"),
("server_id", self.server_id), # obtained from discover
("client_id", self.client_mac),
"end"
])
)
return dhcp_release
def parseOffer(self, packet):
print 'Parsing offer'
print packet.show()
self.response_server_ip = packet[IP].src
self.response_server_mac = packet[Ether].src
self.server_id = packet[BOOTP].siaddr
#FIXME: xid has to match the initial xid
# packet[BOOTP].xid
# FIXME: chaddr has to match client_mac
# str2mac(packet[BOOTP].chaddr)
# FIXME: check if yiaddr match current client ip or requested ip
self.client_ip_offered = packet[BOOTP].yiaddr
for option in packet[DHCP].options:
if type(option) == tuple:
if option[0] == 'subnet_mask':
self.subnet_mask = option[1]
if option[0] == 'router':
self.router = option[1]
if option[0] == 'domain':
self.domain = option[1]
if option[0] == 'name_server':
self.name_server = option[1]
if option[0] == 'lease_time':
self.lease_time = option[1]
def parseACK(self, packet):
print "Parsing ACK"
print packet.show()
# FIXME: check these fields match current ones?
#self.response_server_ip = packet[IP].src
#self.response_server_mac = packet[Ether].src
#self.server_id = packet[BOOTP].siaddr
#FIXME: xid has to match the initial xid
# packet[BOOTP].xid
# FIXME: chaddr has to match client_mac
# str2mac(packet[BOOTP].chaddr)
# FIXME: check if yiaddr match current client ip or requested ip
self.client_ip_offered = packet[BOOTP].yiaddr
#FIXME: check these options match offered ones?
for option in packet[DHCP].options:
if type(option) == tuple:
if option[0] == 'subnet_mask':
self.subnet_mask = option[1]
if option[0] == 'router':
self.router = option[1]
if option[0] == 'domain':
self.domain = option[1]
if option[0] == 'name_server':
self.name_server = option[1]
if option[0] == 'lease_time':
self.lease_time = option[1]
def isOffer(self, packet):
if DHCP in packet and DHCPTypes[packet[DHCP].options[0][1]] == 'offer':
return True
return False
def isNAK(self, packet):
if DHCP in packet and DHCPTypes[packet[DHCP].options[0][1]] == 'nak':
return True
return False
def isACK(self, packet):
if DHCP in packet and DHCPTypes[packet[DHCP].options[0][1]] == 'ack':
return True
return False
def sendDiscover(self):
packet = self.genDiscover()
self.track_history()
print packet.show()
sendp(packet)
print "Sent discover"
def sendRequest(self):
packet = self.genRequest()
self.track_history()
print packet.show()
sendp(packet)
print "Sent request"
def setAddr(self):
print "Setting address"
#FIXME: subprocess.call to really set ip, route, nameserver
set_ip = "ip addr add local %s netmask %s dev %s" % \
(self.client_ip_offered, self.subnet_mask, self.iface)
set_route = "route add default gw %s" % self.router
#FIXME: set nameserver with resolvconf if installed
print set_ip
print set_route
#subprocess.call([set_ip])
#subprocess.call([set_route])
def handleResponse(self, packet):
print "Handling response"
if self.isOffer(packet):
print "Offer detected"
self.parseOffer(packet)
self.sendRequest()
if self.isACK(packet):
print "ACK detected"
self.parseACK(packet)
self.setAddr()
self.state = BOUND_STATE
self.renew_time = self.lease_time * RENEW_TIME_ON_LEASE
self.rebind_time = self.lease_time * REBIND_TIME_ON_LEASE
print "Sleeping for % secs." % self.renew_time
sleep(self.renew_time)
self.state = RENEW_STATE
self.sendRequest()
if self.isNAK(packet):
print "NAK detected"
# FIXME: implement
def getResponse(self, timeout=3, tries=1):
#FIXME: server_port is src and client_port is dst
sniff(filter="udp and (port %s or %s)" % \
(self.server_port, self.client_port),
prn=self.handleResponse, store=0, iface=conf.iface)
def main():
# FIXME: add support for several ifaces
parser = argparse.ArgumentParser()
parser.add_argument('interface', nargs='?',
help='interface to configure with DHCP' )
args = parser.parse_args()
if not args.interface:
args.interface = 'wlan0'
conf.iface = args.interface
conf.checkIPaddr = False
conf.verb = False
c = DHCPv4Client(args.interface)
#FIXME: if interface has already and address, send request with that address
# instead of discover?
c.sendDiscover()
c.getResponse()
print c
if __name__ == "__main__":
main()
| duy/dhcpscapy | scripts/dhcpclientscapy.py | Python | agpl-3.0 | 11,570 |
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
from pyramid.view import view_config
from dace.processinstance.core import DEFAULTMAPPING_ACTIONS_VIEWS
from pontus.view import BasicView
from pontus.form import FormView
from pontus.default_behavior import Cancel
from pontus.view_operation import MultipleView
from pontus.schema import select
from lac.content.processes.services_processes.behaviors import (
RenewSellingTicketsService)
from lac.content.service import (
SellingTicketsServiceSchema, SellingTicketsService)
from lac import _
class RenewSellingTicketsServiceViewStudyReport(BasicView):
title = 'Alert for renew'
name = 'alertforrenew'
template = 'lac:views/services_processes/selling_tickets_service/templates/alert_renew.pt'
def update(self):
result = {}
values = {'context': self.context}
body = self.content(args=values, template=self.template)['body']
item = self.adapt_item(body, self.viewid)
result['coordinates'] = {self.coordinates: [item]}
return result
class RenewSellingTicketsServiceView(FormView):
title = _('Renew the sellingtickets service')
schema = select(SellingTicketsServiceSchema(factory=SellingTicketsService,
editable=True),
['title'])
behaviors = [RenewSellingTicketsService, Cancel]
formid = 'formrenewsellingticketsservice'
name = 'renewsellingticketsservice'
validate_behaviors = False
def default_data(self):
return self.context
@view_config(
name='renewsellingticketsservice',
context=SellingTicketsService,
renderer='pontus:templates/views_templates/grid.pt',
)
class RenewSellingTicketsServiceViewMultipleView(MultipleView):
title = _('Renew the sellingtickets service')
name = 'renewsellingticketsservice'
viewid = 'renewsellingticketsservice'
template = 'daceui:templates/simple_mergedmultipleview.pt'
views = (RenewSellingTicketsServiceViewStudyReport,
RenewSellingTicketsServiceView)
validators = [RenewSellingTicketsService.get_validator()]
DEFAULTMAPPING_ACTIONS_VIEWS.update(
{RenewSellingTicketsService: RenewSellingTicketsServiceViewMultipleView})
| ecreall/lagendacommun | lac/views/services_processes/selling_tickets_service/renew_service.py | Python | agpl-3.0 | 2,351 |
# -*- coding: utf-8 -*-
# © 2016 Comunitea - Javier Colmenero <javier@comunitea.com>
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
import res_partner
import account | Comunitea/CMNT_00098_2017_JIM_addons | partner_consolidate/models/__init__.py | Python | agpl-3.0 | 184 |
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo.tests.common import HttpCase
class UICase(HttpCase):
def test_ui_website(self):
"""Test frontend tour."""
tour = "website_sale_product_brand"
self.phantom_js(
url_path="/shop",
code="odoo.__DEBUG__.services['web_tour.tour']"
".run('%s')" % tour,
ready="odoo.__DEBUG__.services['web_tour.tour']"
".tours.%s.ready" % tour)
| Vauxoo/e-commerce | website_sale_product_brand/tests/test_ui.py | Python | agpl-3.0 | 499 |
# -*- coding: utf-8 -*-
#
##################################################################################
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
import os
import osrframework.utils.configuration as configuration
import osrframework.utils.general as general
class OSRFrameworkException(Exception):
"""
Generic OSrframework Exception
It will be used to show warnings, i. e., any operation which throws an
exception but which does not stop OSRFramework from running.
Messages will be printed as warnings, in orange.
"""
def __init__(self, msg, *args, **kwargs):
Exception.__init__(self, general.warning(msg))
self.generic = "Generic OSRFramework exception."
class NoCredentialsException(OSRFrameworkException):
def __init__(self, platform, *args, **kwargs):
msg = """
[*] Warning:\t{}. Details:
No valid credentials provided for '{}'.
Update the configuration file at: '{}'.
""".format(
self.__class__.__name__,
platform,
os.path.join(configuration.getConfigPath()["appPath"], "accounts.cfg"),
general.emphasis("-x " + platform)
)
OSRFrameworkException.__init__(self, general.warning(msg))
self.generic = "The credentials for some platforms where NOT provided."
class OSRFrameworkError(Exception):
"""
Generic OSrframework Error
It will be used to show errors, i. e., any operation which throws an error
from which OSRFramework cannot get recovered.
Messages will be printed as errors, in red.
"""
def __init__(self, msg, *args, **kwargs):
Exception.__init__(self, "{}".format(general.error(msg)))
self.generic = "Generic OSRFramework error."
class NotImplementedModeError(OSRFrameworkError):
def __init__(self, platform, mode, *args, **kwargs):
msg = """
[*] Error:\t{}. Details:
The '{}' wrapper has tried to call 'self.do_{}(...)'.
The method seems be implemented wrongly or not implemented.""".format(
self.__class__.__name__,
platform,
mode
)
OSRFrameworkError.__init__(self, msg)
self.generic = "A wrapper has tried to launch a mode which is not yet implemented. This error should not be happening unless you have added a new method out of the standard ones for mailfy, phonefy, searchfy or usufy."
class BadImplementationError(OSRFrameworkError):
def __init__(self, original_message, *args, **kwargs):
msg = """
[*] Error:\t{}. Details:
{}.
{}""".format(
self.__class__.__name__,
original_message,
"The wrapper may be missing an attribute like self.creds empty list in its constructor."
)
OSRFrameworkError.__init__(self, msg)
self.generic = "A wrapper has launched an unexpected implementation error."
| i3visio/osrframework | osrframework/utils/exceptions.py | Python | agpl-3.0 | 3,670 |
# This file is part of Wolnelektury, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See NOTICE for more information.
#
__author__ = 'Marek Stępniowski, <marek@stepniowski.com>'
__version__ = '0.1'
| fnp/wolnelektury | src/sponsors/__init__.py | Python | agpl-3.0 | 236 |
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from functools import wraps
TRANSIENT_USER_TYPES = []
def is_transient_user(user):
return isinstance(user, tuple(TRANSIENT_USER_TYPES))
def prevent_access_to_transient_users(view_func):
def _wrapped_view(request, *args, **kwargs):
'''Test if the user is transient'''
for user_type in TRANSIENT_USER_TYPES:
if is_transient_user(request.user):
return HttpResponseRedirect('/')
return view_func(request, *args, **kwargs)
return login_required(wraps(view_func)(_wrapped_view))
def to_list(func):
@wraps(func)
def f(*args, **kwargs):
return list(func(*args, **kwargs))
return f
| pu239ppy/authentic2 | authentic2/decorators.py | Python | agpl-3.0 | 764 |
import analytic
| dreispt/department | analytic_department/__init__.py | Python | agpl-3.0 | 16 |
"""add deleted bool to things
Revision ID: 4d8d18f311aa
Revises: 4e34b4290fbc
Create Date: 2019-02-27 21:31:09.575521
"""
# revision identifiers, used by Alembic.
revision = '4d8d18f311aa'
down_revision = '4e34b4290fbc'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('content_musicplaylist', sa.Column('deleted', sa.Boolean(), nullable=True))
op.add_column('content_podcast', sa.Column('deleted', sa.Boolean(), nullable=True))
op.add_column('content_stream', sa.Column('deleted', sa.Boolean(), nullable=True))
op.add_column('content_track', sa.Column('deleted', sa.Boolean(), nullable=True))
op.add_column('radio_person', sa.Column('deleted', sa.Boolean(), nullable=True))
op.add_column('radio_program', sa.Column('deleted', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('radio_program', 'deleted')
op.drop_column('radio_person', 'deleted')
op.drop_column('content_track', 'deleted')
op.drop_column('content_stream', 'deleted')
op.drop_column('content_podcast', 'deleted')
op.drop_column('content_musicplaylist', 'deleted')
### end Alembic commands ###
| rootio/rootio_web | alembic/versions/4d8d18f311aa_add_deleted_bool_to_.py | Python | agpl-3.0 | 1,319 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Trial.max_participants'
db.delete_column(u'trials_trial', 'max_participants')
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Trial.max_participants'
raise RuntimeError("Cannot reverse this migration. 'Trial.max_participants' and its values cannot be restored.")
models = {
u'trials.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Trial']"})
},
u'trials.invitation': {
'Meta': {'object_name': 'Invitation'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Trial']"})
},
u'trials.participant': {
'Meta': {'object_name': 'Participant'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Trial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['userprofiles.RMUser']", 'null': 'True', 'blank': 'True'})
},
u'trials.report': {
'Meta': {'object_name': 'Report'},
'binary': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'participant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Participant']", 'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Trial']"}),
'variable': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Variable']"})
},
u'trials.trial': {
'Meta': {'object_name': 'Trial'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'finish_date': ('django.db.models.fields.DateField', [], {}),
'group_a': ('django.db.models.fields.TextField', [], {}),
'group_a_desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'group_a_expected': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_b': ('django.db.models.fields.TextField', [], {}),
'group_b_desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'group_b_impressed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instruction_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'instruction_delivery': ('django.db.models.fields.TextField', [], {'default': "'im'", 'max_length': '2'}),
'instruction_hours_after': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_edited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'min_participants': ('django.db.models.fields.IntegerField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['userprofiles.RMUser']"}),
'participants': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'recruiting': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'recruitment': ('django.db.models.fields.CharField', [], {'default': "'an'", 'max_length': '2'}),
'reporting_freq': ('django.db.models.fields.CharField', [], {'default': "'da'", 'max_length': '200'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'stopped': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'trials.variable': {
'Meta': {'object_name': 'Variable'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Trial']"})
},
u'userprofiles.rmuser': {
'Meta': {'object_name': 'RMUser'},
'account': ('django.db.models.fields.CharField', [], {'default': "'st'", 'max_length': '2'}),
'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'receive_questions': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'})
}
}
complete_apps = ['trials'] | openhealthcare/randomise.me | rm/trials/migrations/0035_auto__del_field_trial_max_participants.py | Python | agpl-3.0 | 7,643 |
# -*- coding: utf-8 -*-
from flask import (Blueprint, redirect, url_for, render_template, flash,
request, abort, send_file, current_app)
from flask_babel import gettext
from sqlalchemy.orm.exc import NoResultFound
import crypto_util
import store
from db import db_session, Submission
from journalist_app.decorators import login_required
from journalist_app.forms import ReplyForm
from journalist_app.utils import (make_star_true, make_star_false, get_source,
delete_collection, col_download_unread,
col_download_all, col_star, col_un_star,
col_delete)
def make_blueprint(config):
view = Blueprint('col', __name__)
@view.route('/add_star/<filesystem_id>', methods=('POST',))
@login_required
def add_star(filesystem_id):
make_star_true(filesystem_id)
db_session.commit()
return redirect(url_for('main.index'))
@view.route("/remove_star/<filesystem_id>", methods=('POST',))
@login_required
def remove_star(filesystem_id):
make_star_false(filesystem_id)
db_session.commit()
return redirect(url_for('main.index'))
@view.route('/<filesystem_id>')
@login_required
def col(filesystem_id):
form = ReplyForm()
source = get_source(filesystem_id)
source.has_key = crypto_util.getkey(filesystem_id)
return render_template("col.html", filesystem_id=filesystem_id,
source=source, form=form)
@view.route('/delete/<filesystem_id>', methods=('POST',))
@login_required
def delete_single(filesystem_id):
"""deleting a single collection from its /col page"""
source = get_source(filesystem_id)
delete_collection(filesystem_id)
flash(gettext("{source_name}'s collection deleted")
.format(source_name=source.journalist_designation),
"notification")
return redirect(url_for('main.index'))
@view.route('/process', methods=('POST',))
@login_required
def process():
actions = {'download-unread': col_download_unread,
'download-all': col_download_all, 'star': col_star,
'un-star': col_un_star, 'delete': col_delete}
if 'cols_selected' not in request.form:
flash(gettext('No collections selected.'), 'error')
return redirect(url_for('main.index'))
# getlist is cgi.FieldStorage.getlist
cols_selected = request.form.getlist('cols_selected')
action = request.form['action']
if action not in actions:
return abort(500)
method = actions[action]
return method(cols_selected)
@view.route('/<filesystem_id>/<fn>')
@login_required
def download_single_submission(filesystem_id, fn):
"""Sends a client the contents of a single submission."""
if '..' in fn or fn.startswith('/'):
abort(404)
try:
Submission.query.filter(
Submission.filename == fn).one().downloaded = True
db_session.commit()
except NoResultFound as e:
current_app.logger.error(
"Could not mark " + fn + " as downloaded: %s" % (e,))
return send_file(store.path(filesystem_id, fn),
mimetype="application/pgp-encrypted")
return view
| micahflee/securedrop | securedrop/journalist_app/col.py | Python | agpl-3.0 | 3,437 |
#! /usr/bin/env python
#
# Copyright (c) 2008-2009 University of Utah and the Flux Group.
#
# {{{GENIPUBLIC-LICENSE
#
# GENI Public License
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#
# }}}
#
#
#
import sys
import pwd
import getopt
import os
import time
import re
import xmlrpclib
from M2Crypto import X509
ACCEPTSLICENAME=1
execfile( "test-common.py" )
#
# Get a credential for myself, that allows me to do things at the SA.
#
mycredential = get_self_credential()
print "Got self credential"
#
# Lookup slice.
#
myslice = resolve_slice( SLICENAME, mycredential )
print "Resolved slice " + SLICENAME
#
# Get the slice credential.
#
slicecredential = get_slice_credential( myslice, mycredential )
print "Got slice credential"
#
# Bind to slice at the CM.
#
params = {}
params["credential"] = slicecredential
rval,response = do_method("cm", "BindToSlice", params)
if rval:
Fatal("Could not bind myself to slice")
pass
print "Bound myself to slice"
| nmc-probe/emulab-nome | protogeni/test/version1/binduser.py | Python | agpl-3.0 | 1,946 |
from django.contrib.auth.models import User
from pandas import read_csv, notnull, DataFrame
from numpy import isnan
from django.test import TestCase
from rhizome.models.campaign_models import Campaign, CampaignType, \
DataPointComputed, AggDataPoint
from rhizome.models.location_models import Location, LocationType, \
LocationTree
from rhizome.models.indicator_models import Indicator, IndicatorTag, \
IndicatorToTag, CalculatedIndicatorComponent
from rhizome.models.document_models import Document, SourceSubmission
from rhizome.models.datapoint_models import DataPoint
from rhizome.cache_meta import LocationTreeCache
from rhizome.tests.setup_helpers import TestSetupHelpers
class AggRefreshTestCase(TestCase):
'''
'''
def __init__(self, *args, **kwargs):
super(AggRefreshTestCase, self).__init__(*args, **kwargs)
def setUp(self):
self.ts = TestSetupHelpers()
data_df = read_csv('rhizome/tests/_data/calc_data.csv')
self.create_metadata()
self.user = User.objects.get(username="test")
self.test_df = data_df[data_df['is_raw'] == 1]
self.target_df = data_df[data_df['is_raw'] == 0]
self.campaign_id = Campaign.objects.all()[0].id
self.top_lvl_location = Location.objects.filter(name='Nigeria')[0]
ltr = LocationTreeCache()
ltr.main()
def create_metadata(self):
'''
Creating the Indicator, location, Campaign, meta data needed for the
system to aggregate / caclulate.
'''
read_csv('rhizome/tests/_data/campaigns.csv')
location_df = read_csv('rhizome/tests/_data/locations.csv')
indicator_df = read_csv('rhizome/tests/_data/indicators.csv')
user_id = User.objects.create_user('test', 'john@john.com', 'test').id
self.location_type1 = LocationType.objects.create(admin_level=0,
name="country", id=1)
self.location_type2 = LocationType.objects.create(admin_level=1,
name="province", id=2)
campaign_type1 = CampaignType.objects.create(name='test')
self.locations = self.model_df_to_data(location_df, Location)
self.indicators = self.model_df_to_data(indicator_df, Indicator)
ind_tag = IndicatorTag.objects.create(tag_name='Polio')
sub_tag = IndicatorTag.objects.create(tag_name='Polio Management',
parent_tag_id=ind_tag.id)
ind_to_tag_batch = [IndicatorToTag(
**{'indicator_tag_id': sub_tag.id, 'indicator_id': ind.id}) for ind in self.indicators]
IndicatorToTag.objects.bulk_create(ind_to_tag_batch)
self.campaign_id = Campaign.objects.create(
start_date='2016-01-01',
end_date='2016-01-02',
campaign_type_id=campaign_type1.id
).id
document = Document.objects.create(
doc_title='test',
created_by_id=user_id,
guid='test')
self.ss = SourceSubmission.objects.create(
document_id=document.id,
submission_json='',
row_number=0,
data_date='2016-01-01'
).id
def model_df_to_data(self, model_df, model):
meta_ids = []
non_null_df = model_df.where((notnull(model_df)), None)
list_of_dicts = non_null_df.transpose().to_dict()
for row_ix, row_dict in list_of_dicts.iteritems():
row_id = model.objects.create(**row_dict)
meta_ids.append(row_id)
return meta_ids
def create_raw_datapoints(self):
for row_ix, row_data in self.test_df.iterrows():
dp_id = self.create_datapoint(row_data.location_id, row_data
.data_date, row_data.indicator_id, row_data.value)
# def create_datapoint(self, **kwargs):
def create_datapoint(self, location_id, data_date, indicator_id, value):
'''
Right now this is being performed as a database insert. I would like to
Test this against the data entry resource, but this will do for now
in order to test caching.
'''
document_id = Document.objects.get(doc_title='test').id
ss_id = SourceSubmission.objects.get(document_id=document_id).id
dp = DataPoint.objects.create(
location_id=location_id,
data_date=data_date,
indicator_id=indicator_id,
campaign_id=self.campaign_id,
value=value,
source_submission_id=ss_id,
unique_index=str(location_id) + str(data_date) +
str(self.campaign_id) + str(indicator_id)
)
return dp
def test_location_aggregation(self):
'''
Using the calc_data.csv, create a test_df and target_df. Ensure that
the aggregation and calcuation are working properly, but ingesting the
stored data, running the cache, and checking that the calculated data
for the aggregate location (parent location, in this case Nigeria) is as
expected.
In addition to the datapoints in the test file, i insert a null valu
to ensure that any null won't corrpupt the calculation.
python manage.py test rhizome.tests.test_agg.AggRefreshTestCase.
test_location_aggregation --settings=rhizome.settings.test
'''
self.create_raw_datapoints()
indicator_id, data_date, raw_location_id,\
agg_location_id, null_location_id, NaN_location_id = \
22, '2016-01-01', 12910, 12907, 12928, 12913
location_ids = Location.objects.filter(
parent_location_id=agg_location_id).values_list('id', flat=True)
DataPoint.objects.filter(
indicator_id=indicator_id,
# data_date = data_date,
location_id=null_location_id
).update(value=None)
DataPoint.objects.filter(
indicator_id=indicator_id,
# data_date = data_date,
location_id=NaN_location_id
).update(value='NaN')
dps = DataPoint.objects.filter(
indicator_id=indicator_id,
# data_date = data_date,
location_id__in=location_ids,
value__isnull=False
).values_list('id', 'value')
sum_dp_value = sum([y for x, y in dps if not isnan(y)])
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
#################################################
## ensure that raw data gets into AggDataPoint ##
#################################################
raw_value = DataPoint.objects.get(
# data_date = data_date,
indicator_id=indicator_id,
location_id=raw_location_id)\
.value
ind_obj = Indicator.objects.get(id=indicator_id)
raw_value_in_agg = AggDataPoint.objects.get(
# data_date = data_date,
indicator_id=indicator_id,
location_id=raw_location_id)\
.value
self.assertEqual(raw_value, raw_value_in_agg)
#############################################
## ensure that the aggregated data gets in ##
#############################################
loc_tree_df = DataFrame(list(LocationTree.objects.all().values()))
agg_df = DataFrame(list(AggDataPoint.objects.filter(\
indicator_id=indicator_id,\
campaign_id=self.campaign_id
).values()))
agg_value = AggDataPoint.objects.get(
indicator_id=indicator_id,
campaign_id=self.campaign_id,
location_id=agg_location_id
).value
self.assertEqual(agg_value, sum_dp_value)
######################################################
## ensure that any raw data will override aggregate ##
######################################################
override_value = 909090
agg_override_dp = self.create_datapoint(agg_location_id, data_date,
indicator_id, override_value)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
override_value_in_agg = AggDataPoint.objects.get(
campaign_id=self.campaign_id,
indicator_id=indicator_id,
location_id=agg_location_id).value
self.assertEqual(override_value, override_value_in_agg)
###########################################
# ensure that percentages do not aggregate
###########################################
pct_ind = Indicator.objects.create(
name='pct missed',
short_name='pct_missed',
description='missed pct',
data_format='pct',
source_name='my brain',
)
dp_1 = DataPoint.objects.create(
indicator_id=pct_ind.id,
location_id=location_ids[0],
campaign_id=self.campaign_id,
data_date=data_date,
value=.2,
source_submission_id=self.ss,
unique_index=1
)
dp_2 = DataPoint.objects.create(
indicator_id=pct_ind.id,
location_id=location_ids[1],
campaign_id=self.campaign_id,
data_date=data_date,
value=.6,
source_submission_id=self.ss,
unique_index=2
)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
try:
agg_dp_qs = AggDataPoint.objects.get(
location_id=agg_location_id,
indicator_id=pct_ind,
campaign_id=self.campaign_id,
)
error_ocurred = False
except AggDataPoint.DoesNotExist:
error_ocurred = True
self.assertTrue(error_ocurred)
def test_raw_data_to_computed(self):
'''
This just makes sure that any data in the datapoint table, gets into the
Calculated DataPoint table. That is, i insert a value for missed
children in Borno, the same exact data should be in the
datapoint_with_computed table no matter what.
'''
self.create_raw_datapoints()
indicator_id, data_date, raw_location_id,\
agg_location_id, campaign_id = 22, '2016-01-01', 12910, 12907, 1
location_ids = Location.objects.filter(
parent_location_id=agg_location_id).values_list('id', flat=True)
dp_values = DataPoint.objects.filter(
indicator_id=indicator_id,
data_date=data_date,
location_id__in=location_ids
).values_list('value', flat=True)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
############################################################
## ensure that raw data gets into datapoint_with_computed ##
############################################################
raw_value = DataPoint.objects.get(data_date=data_date,
indicator_id=indicator_id,
location_id=raw_location_id)\
.value
raw_value_in_agg = DataPointComputed.objects.get(
campaign_id=self.campaign_id,
indicator_id=indicator_id,
location_id=raw_location_id)\
.value
self.assertEqual(raw_value, raw_value_in_agg)
def test_sum_and_pct(self):
'''
The system uses the "PART_TO_BE_SUMMED" edge type in order to create
indicators such that the sum of:
- Number Missed
- Missed due to other reasons(24)
- Child Absent(251)
- Not in Plan (267)
- Not Visited (268)
- Non Compliance(264)
gives us: All Missed Children (21)
as well as: pct missed children due to refusal (166)
Here we create new metadata so we can test this functionality for an
Abstracted use case and test that
1. We can SUM indicators
2. We can use the result of #2 as the denominator for a percentage
calculation.
'''
Indicator.objects.all().delete()
data_date, location_id, agg_location_id = '2016-01-01', 12910, 12907
val_1, val_2, val_3 = 303, 808, 909
## create the parent and sub indicators ##
parent_indicator = Indicator.objects.create(
name='Number of Avoidable Deaths',
short_name='Number of Avoidable Deaths',
data_format='int'
)
sub_indicator_1 = Indicator.objects.create(
name='Number of Deaths due to Conflict',
short_name='Number of Deaths due to Conflict',
data_format='int'
)
sub_indicator_2 = Indicator.objects.create(
name='Number of Deaths due to Malaria',
short_name='Number of Deaths due to Malaria',
data_format='int'
)
sub_indicator_3 = Indicator.objects.create(
name='Number of Deaths due to Hunger',
short_name='Number of Deaths due to Hunger',
data_format='int'
)
pct_indicator = Indicator.objects.create(
name='pct of Deaths due to Hunger',
short_name='pct of Deaths due to Hunger',
data_format='pct'
)
## FOR SUM OF PARTS CALUCLATIONS ##
indicator_calc_1 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_1.id,
calculation='PART_TO_BE_SUMMED'
)
indicator_calc_2 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_2.id,
calculation='PART_TO_BE_SUMMED'
)
indicator_calc_3 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_3.id,
calculation='PART_TO_BE_SUMMED'
)
## FOR PART OVER WHOLE CALCULATIONS ##
indicator_calc_numerator = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_indicator.id,
indicator_component_id=sub_indicator_3.id,
calculation='NUMERATOR'
)
indicator_calc_denominator = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_indicator.id,
indicator_component_id=parent_indicator.id,
calculation='DENOMINATOR'
)
ss_id = SourceSubmission.objects.all()[0].id
## create the datapoints ##
dp_1 = DataPoint.objects.create(
indicator_id=sub_indicator_1.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=val_1,
source_submission_id=ss_id,
unique_index=1
)
dp_2 = DataPoint.objects.create(
indicator_id=sub_indicator_2.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=val_2,
source_submission_id=ss_id,
unique_index=2
)
dp_3 = DataPoint.objects.create(
indicator_id=sub_indicator_3.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=val_3,
source_submission_id=ss_id,
unique_index=3
)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
calc_value_sum = DataPointComputed.objects.get(
indicator_id=parent_indicator.id,
campaign_id=self.campaign_id,
location_id=location_id
).value
calc_value_pct = DataPointComputed.objects.get(
indicator_id=pct_indicator.id,
campaign_id=self.campaign_id,
location_id=location_id
).value
# test SUM calculation
sum_target_value = val_1 + val_2 + val_3
self.assertEqual(calc_value_sum, sum_target_value)
# test part over whole calction
pct_target_value = val_3 / float(sum_target_value)
self.assertEqual(calc_value_pct, pct_target_value)
def test_part_of_difference(self):
'''
see here: rhizome.work/manage_system/manage/indicator/187
We use this calculation to perform the following calculation:
WHOLE_OF_DIFFERENCE(x) - PART_OF_DIFFERENCE(y)
-----------------------------------------
WHOLE_OF_DIFFERENCE(x)
'''
data_date, location_id, agg_location_id = '2016-01-01', 12910, 12907
x, y = 303.00, 808.00
## create the parent and sub indicators ##
parent_indicator = Indicator.objects.create(
name='Refsual Conversion',
short_name='Refsual Conversion',
data_format='pct'
)
sub_indicator_part = Indicator.objects.create(
name='Refusals After Revisit',
short_name='Refusals After Revisit',
data_format='int'
)
sub_indicator_denom = Indicator.objects.create(
name='Refusals Before Revisit',
short_name='Refusals Before Revisit',
data_format='int'
)
## FOR SUM OF PARTS CALUCLATIONS ##
indicator_calc_1 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_part.id,
calculation='PART_OF_DIFFERENCE'
)
indicator_calc_3 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_denom.id,
calculation='WHOLE_OF_DIFFERENCE'
)
ss_id = SourceSubmission.objects.all()[0].id
## create the datapoints ##
dp_1 = DataPoint.objects.create(
indicator_id=sub_indicator_denom.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=x,
source_submission_id=ss_id,
unique_index=1
)
dp_2 = DataPoint.objects.create(
indicator_id=sub_indicator_part.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=y,
source_submission_id=ss_id,
unique_index=2
)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
calc_value = DataPointComputed.objects.get(
indicator_id=parent_indicator.id,
campaign_id=self.campaign_id,
location_id=location_id
).value
# test SUM calculation
target_value = (x - y) / x
self.assertEqual(round(calc_value, 4), round(target_value, 4))
def test_missing_part_of_sum(self):
data_date, location_id, agg_location_id = '2016-01-01', 12910, 12907
val_1, val_2 = 101, 102
## create the parent and sub indicators ##
parent_indicator = Indicator.objects.create(
name='Number of Missing Children',
short_name='Number of Avoidable Deaths',
data_format='int'
)
sub_indicator_1 = Indicator.objects.create(
name='Number Missing Due to Refusal',
short_name='Number Missing Due to Refusal',
data_format='int'
)
sub_indicator_2 = Indicator.objects.create(
name='Number Missing Due to Absence',
short_name='Number Missing Due to Absence',
data_format='int'
)
sub_indicator_3 = Indicator.objects.create(
name='Number Missing Due to ??',
short_name='Number Missing Due to ??',
data_format='int'
)
indicator_calc_1 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_1.id,
calculation='PART_TO_BE_SUMMED'
)
indicator_calc_2 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_2.id,
calculation='PART_TO_BE_SUMMED'
)
indicator_calc_3 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_3.id,
calculation='PART_TO_BE_SUMMED'
)
ss_id = SourceSubmission.objects.all()[0].id
## create the datapoints. We're only adding data points for ##
## two of the three datapoints that are mapped as parts to be summed ##
dp_1 = DataPoint.objects.create(
indicator_id=sub_indicator_1.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=val_1,
source_submission_id=ss_id,
unique_index=1
)
dp_2 = DataPoint.objects.create(
indicator_id=sub_indicator_2.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=val_2,
source_submission_id=ss_id,
unique_index=2
)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
calc_value_sum = DataPointComputed.objects.get(
indicator_id=parent_indicator.id,
campaign_id=self.campaign_id,
location_id=location_id
).value
sum_target_value = val_1 + val_2
self.assertEqual(calc_value_sum, sum_target_value)
def test_recursive_sum(self):
'''
Consider the case in which we have "number of missed children" which is
the sum of "missed children due to absence", "missed children due to
refusal", and "missed children due to child absence."
Now consider that "missed children due to refusal" is also generated
from the sum of "refusal due to religious reasons", "refusal due to
too many rounds", "refusal due to - unhappy with team " (see more here:
http://rhizome.work/manage_system/manage/indicator/264).
There are two levels here and this test aims to cover this use case.
'''
data_date, location_id = '2016-01-01', 12910
Indicator.objects.all().delete()
parent_indicator = Indicator.objects.create(
name='Number of Avoidable Deaths',
short_name='Number of Avoidable Deaths',
data_format='int'
)
sub_indicator_1 = Indicator.objects.create(
name='Number of Deaths due to Conflict',
short_name='Number of Deaths due to Conflict',
data_format='int'
)
sub_sub_indicator_1 = Indicator.objects.create(
name='Number Conflict Deaths - Children',
short_name='Conflict Deaths - Children',
data_format='int'
)
sub_sub_indicator_2 = Indicator.objects.create(
name='Number of Adult Civilian Deaths',
short_name='Number of Adult Civilian Deaths',
data_format='int'
)
sub_sub_indicator_3 = Indicator.objects.create(
name='Number of Conflict Deaths - Militants',
short_name='Conflict Deaths - Militants',
data_format='int'
)
sub_indicator_2 = Indicator.objects.create(
name='Number of Deaths due to Malaria',
short_name='Number of Deaths due to Malaria',
data_format='int'
)
sub_indicator_2_sub_1 = Indicator.objects.create(
name='Number of Deaths due to Malaria -- Child had No Net',
short_name='Number of Deaths due to Malaria -- no net',
data_format='int'
)
sub_indicator_2_sub_2 = Indicator.objects.create(
name='Number of Deaths due to Malaria -- Child had No Medicine',
short_name='Number of Deaths due to Malaria -- no Medicie',
data_format='int'
)
sub_indicator_3 = Indicator.objects.create(
name='Number of Deaths due to Hunger',
short_name='Number of Deaths due to Hunger',
data_format='int'
)
## FOR SUM OF PARTS CALUCLATIONS ##
indicator_calc_1 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_1.id,
calculation='PART_TO_BE_SUMMED'
)
indicator_calc_2 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_2.id,
calculation='PART_TO_BE_SUMMED'
)
indicator_calc_3 = CalculatedIndicatorComponent.objects.create(
indicator_id=parent_indicator.id,
indicator_component_id=sub_indicator_3.id,
calculation='PART_TO_BE_SUMMED'
)
## 2nd layer of indicator calculation ##
sub_indicator_calc_1 = CalculatedIndicatorComponent.objects.create(
indicator_id=sub_indicator_1.id,
indicator_component_id=sub_sub_indicator_1.id,
calculation='PART_TO_BE_SUMMED'
)
sub_indicator_calc_2 = CalculatedIndicatorComponent.objects.create(
indicator_id=sub_indicator_1.id,
indicator_component_id=sub_sub_indicator_2.id,
calculation='PART_TO_BE_SUMMED'
)
sub_indicator_calc_3 = CalculatedIndicatorComponent.objects.create(
indicator_id=sub_indicator_1.id,
indicator_component_id=sub_sub_indicator_3.id,
calculation='PART_TO_BE_SUMMED'
)
## 2nd layer of indicator calculation ##
sub_indicator_calc_1 = CalculatedIndicatorComponent.objects.create(
indicator_id=sub_indicator_2.id,
indicator_component_id=sub_indicator_2_sub_1.id,
calculation='PART_TO_BE_SUMMED'
)
sub_indicator_calc_2 = CalculatedIndicatorComponent.objects.create(
indicator_id=sub_indicator_2.id,
indicator_component_id=sub_indicator_2_sub_2.id,
calculation='PART_TO_BE_SUMMED'
)
## create all the datapoints ##
values_to_insert = {
sub_indicator_2.id: 33,
sub_indicator_3.id: 44,
sub_sub_indicator_1.id: 44,
sub_sub_indicator_2.id: 55,
sub_sub_indicator_3.id: 66,
sub_indicator_2_sub_1.id: 77,
sub_indicator_2_sub_2.id: 88,
}
for k, v in values_to_insert.iteritems():
self.create_datapoint(location_id, data_date, k, v)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
parent_indicator_target_value = sum(values_to_insert.values())
parent_indicator_1_actual_value = DataPointComputed.objects.get(
location_id=location_id,
indicator_id=parent_indicator,
).value
self.assertEqual(parent_indicator_1_actual_value,
parent_indicator_target_value)
# test that a parent overrides the sum of its children when there
## are multiple levels of indicator calcuations ##
sub_2_target_val = values_to_insert[sub_indicator_2.id]
sub_2_actual_val = DataPointComputed.objects.get(
location_id=location_id,
indicator_id=sub_indicator_2.id,
).value
self.assertEqual(sub_2_target_val, sub_2_actual_val)
def test_boolean_aggregation(self):
# create a boolean indicato
boolean_indicator = Indicator.objects.create(
name='Is Controlled by "Anti Governemnt Elements"',
short_name='Is at War',
data_format='bool'
)
# find the locations for which we should store raw data.. For instance
# if it is 'district is at war', then we dont expect data stored at
# the porivnce level. Here though, we get all children of a particluar
# parent.
locations = Location.objects.filter(
parent_location_id=self.top_lvl_location.id)
# split the data into 1 value being fale, the rest being trye.
# this aludes to the fact that the parent location shoul dhave a value
# that is somethign like [ 1 / data.length() ]
false_loc_id = locations[0].id
true_loc_list = locations[1:]
## create the true and false datapoints ##
false_datapoint = DataPoint.objects.create(
campaign_id=self.campaign_id,
location_id=false_loc_id,
indicator_id=boolean_indicator.id,
source_submission_id=self.ss,
value=0
)
true_datapoint_batch = [DataPoint(**{
'campaign_id': self.campaign_id,
'location_id': loc.id,
'indicator_id': boolean_indicator.id,
'source_submission_id': self.ss,
'value': 1,
'unique_index': str(self.campaign_id) + str(boolean_indicator.id) + str(loc.id)
}) for loc in true_loc_list]
DataPoint.objects.bulk_create(true_datapoint_batch)
# run the agg refresh ( this is the code that will actually transofrm
# the booleans to numerics. )
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
# now get the expected aggrgated data and compare it with the percentage
# value that we expect given how we split up the locations above.
dwc_value = DataPointComputed.objects.get(
location_id=self.top_lvl_location.id,
campaign_id=self.campaign_id,
indicator=boolean_indicator.id
).value
expected_value = 1 - (1.0 / len(locations))
self.assertEqual(expected_value, dwc_value)
def test_calculated_indicator_agg(self):
Indicator.objects.all().delete()
data_date, agg_location_id = '2016-01-01', 12907
child_locations = Location.objects.filter(
parent_location_id=agg_location_id)
location_id = child_locations[0].id
location_id_2 = child_locations[1].id
## create the parent and sub indicators ##
parent_indicator = Indicator.objects.create(
name='Number of Avoidable Deaths',
short_name='Number of Avoidable Deaths',
data_format='int'
)
sub_indicator_1 = Indicator.objects.create(
name='Number of Deaths due to Conflict',
short_name='Number of Deaths due to Conflict',
data_format='int'
)
pct_indicator = Indicator.objects.create(
name='pct of Deaths due to Conflict',
short_name='pct of Deaths due to Conflict',
data_format='pct'
)
## FOR PART OVER WHOLE CALCULATIONS ##
indicator_calc_numerator = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_indicator.id,
indicator_component_id=sub_indicator_1.id,
calculation='NUMERATOR'
)
indicator_calc_denominator = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_indicator.id,
indicator_component_id=parent_indicator.id,
calculation='DENOMINATOR'
)
val_1 = 32
val_2 = 100
val_1_loc_2 = 48
val_2_loc_2 = 200
ss_id = SourceSubmission.objects.all()[0].id
## create the datapoints ##
dp_1 = DataPoint.objects.create(
indicator_id=sub_indicator_1.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=val_1,
source_submission_id=ss_id,
unique_index=1
)
dp_2 = DataPoint.objects.create(
indicator_id=parent_indicator.id,
data_date=data_date,
location_id=location_id,
campaign_id=self.campaign_id,
value=val_2,
source_submission_id=ss_id,
unique_index=2
)
dp_1_loc_2 = DataPoint.objects.create(
indicator_id=sub_indicator_1.id,
data_date=data_date,
location_id=location_id_2,
campaign_id=self.campaign_id,
value=val_1_loc_2,
source_submission_id=ss_id,
unique_index=3
)
dp_2_loc_2 = DataPoint.objects.create(
indicator_id=parent_indicator.id,
data_date=data_date,
location_id=location_id_2,
campaign_id=self.campaign_id,
value=val_2_loc_2,
source_submission_id=ss_id,
unique_index=4
)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
calc_value_pct = DataPointComputed.objects.get(
indicator_id=pct_indicator.id,
campaign_id=self.campaign_id,
location_id=location_id
).value
calc_value_pct_2 = DataPointComputed.objects.get(
indicator_id=pct_indicator.id,
campaign_id=self.campaign_id,
location_id=location_id_2
).value
# test part over whole calculation for child locations
pct_target_value = val_1 / float(val_2)
self.assertEqual(calc_value_pct, pct_target_value)
pct_target_value_2 = val_1_loc_2 / float(val_2_loc_2)
self.assertEqual(calc_value_pct_2, pct_target_value_2)
# make sure that part over whole aggregates as well
total_dp = DataPointComputed.objects.get(
indicator_id=parent_indicator.id,
campaign_id=self.campaign_id,
location_id=agg_location_id).value
self.assertEqual(total_dp, val_2 + val_2_loc_2)
try:
pct_dp = DataPointComputed.objects.get(
indicator_id=pct_indicator.id,
campaign_id=self.campaign_id,
location_id=agg_location_id).value
except ObjectDoesNotExist:
fail("aggregation did not work")
self.assertEqual(round(pct_dp, 5), round(
(val_1 + val_1_loc_2) / float(val_2 + val_2_loc_2), 5))
def test_multiple_calculations(self):
num_seen = Indicator.objects.create(
name='number children seen',
short_name='number children seen',
data_format='int'
)
num_vacc = Indicator.objects.create(
name='number children vaccinated',
short_name='number children vaccinated',
data_format='int'
)
num_missed = Indicator.objects.create(
name='number children missed',
short_name='number children missed',
data_format='int'
)
pct_missed = Indicator.objects.create(
name='pct childrent missed',
short_name='pct children missed',
data_format='pct'
)
indicator_calc_numerator = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_missed.id,
indicator_component_id=num_missed.id,
calculation='NUMERATOR'
)
indicator_calc_denominator = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_missed.id,
indicator_component_id=num_seen.id,
calculation='DENOMINATOR'
)
indicator_calc_part_of_diff = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_missed.id,
indicator_component_id=num_vacc.id,
calculation='PART_OF_DIFFERENCE'
)
indicator_calc_part_of_whole = CalculatedIndicatorComponent.objects.create(
indicator_id=pct_missed.id,
indicator_component_id=num_seen.id,
calculation='WHOLE_OF_DIFFERENCE'
)
num_missed_val = 45.0
num_seen_val = 100.0
num_vacc_val = 55.0
ss_id = SourceSubmission.objects.all()[0].id
dp_num_missed = DataPoint.objects.create(
indicator_id=num_missed.id,
location_id=self.top_lvl_location.id,
campaign_id=self.campaign_id,
value=num_missed_val,
source_submission_id=ss_id,
unique_index=3
)
dp_num_seen = DataPoint.objects.create(
indicator_id=num_seen.id,
location_id=self.top_lvl_location.id,
campaign_id=self.campaign_id,
value=num_seen_val,
source_submission_id=ss_id,
unique_index=4
)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
# check that numerator and denominator option work
cdp_pct_missed_1 = DataPointComputed.objects.filter(
indicator_id=pct_missed.id)[0]
self.assertEqual(cdp_pct_missed_1.value,
num_missed_val / float(num_seen_val))
dp_num_vaccinated = DataPoint.objects.create(
indicator_id=num_vacc.id,
location_id=self.top_lvl_location.id,
campaign_id=self.campaign_id,
value=num_vacc_val,
source_submission_id=ss_id,
unique_index=5
)
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
# check that this works when we can do whole/part of difference
cdp_pct_missed_2 = DataPointComputed.objects.filter(
indicator_id=pct_missed.id)[0]
1.0 - float(num_vacc_val) / float(num_seen_val)
self.assertEqual(cdp_pct_missed_2.value, 0.45)
# check that this works when we can only do whole/part of difference
DataPoint.objects.filter(indicator_id=num_missed.id).delete()
campaign_object = Campaign.objects.get(id = self.campaign_id)
campaign_object.aggregate_and_calculate()
cdp_pct_missed_3 = DataPointComputed.objects.filter(
indicator_id=pct_missed.id)[0]
self.assertEqual(cdp_pct_missed_3.value, 0.45)
| unicef/rhizome | rhizome/tests/test_agg.py | Python | agpl-3.0 | 38,994 |
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django import forms
from base.forms.common import set_trans_txt
from cms.models import translated_text
from cms.enums import entity_name
from ckeditor.widgets import CKEditorWidget
class LearningUnitSpecificationsForm(forms.Form):
learning_unit_year = language = None
def __init__(self, learning_unit_year, language, *args, **kwargs):
self.learning_unit_year = learning_unit_year
self.language = language
self.refresh_data()
super(LearningUnitSpecificationsForm, self).__init__(*args, **kwargs)
def refresh_data(self):
language_iso = self.language[0]
texts_list = translated_text.search(entity=entity_name.LEARNING_UNIT_YEAR,
reference=self.learning_unit_year.id,
language=language_iso) \
.exclude(text__isnull=True)
set_trans_txt(self, texts_list)
class LearningUnitSpecificationsEditForm(forms.Form):
trans_text = forms.CharField(widget=CKEditorWidget(config_name='minimal'), required=False)
cms_id = forms.IntegerField(widget=forms.HiddenInput, required=True)
def __init__(self, *args, **kwargs):
self.learning_unit_year = kwargs.pop('learning_unit_year', None)
self.language_iso = kwargs.pop('language', None)
self.text_label = kwargs.pop('text_label', None)
super(LearningUnitSpecificationsEditForm, self).__init__(*args, **kwargs)
def load_initial(self):
value = translated_text.get_or_create(entity=entity_name.LEARNING_UNIT_YEAR,
reference=self.learning_unit_year.id,
language=self.language_iso,
text_label=self.text_label)
self.fields['cms_id'].initial = value.id
self.fields['trans_text'].initial = value.text
def save(self):
cleaned_data = self.cleaned_data
trans_text = translated_text.find_by_id(cleaned_data['cms_id'])
trans_text.text = cleaned_data.get('trans_text')
trans_text.save()
| uclouvain/osis_louvain | base/forms/learning_unit_specifications.py | Python | agpl-3.0 | 3,409 |
import datetime
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from simple_history.models import HistoricalRecords
from adesao.middleware import get_current_user
TIPOS_DILIGENCIA = (
('geral', 'Geral'),
('componente', 'Específica'),)
LISTA_SITUACAO_ARQUIVO = (
(0, "Em preenchimento"),
(1, "Avaliando anexo"),
(2, "Concluída"),
(3, "Arquivo aprovado com ressalvas"),
(4, "Arquivo danificado"),
(5, "Arquivo incompleto"),
(6, "Arquivo incorreto"),
)
class Diligencia(models.Model):
texto_diligencia = models.TextField(max_length=200)
classificacao_arquivo = models.IntegerField(choices=LISTA_SITUACAO_ARQUIVO,
null=True, blank=True)
data_criacao = models.DateField(default=datetime.date.today)
# sistema_cultura = models.ForeignKey('adesao.SistemaCultura', on_delete=models.CASCADE,
# null=True, blank=True)
componente_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
componente_id = models.PositiveIntegerField()
componente = GenericForeignKey('componente_type', 'componente_id')
usuario = models.ForeignKey('adesao.Usuario', on_delete=models.CASCADE)
tipo_diligencia = models.CharField(
max_length=10,
choices=TIPOS_DILIGENCIA)
def __str__(self):
return str(self.id)
class ArquivoHistoricalDiligenciaSimples(models.Model):
arquivo_url = models.URLField(blank=True, null=True)
class Meta:
abstract = True
class DiligenciaSimples(models.Model):
texto_diligencia = models.TextField(max_length=200)
classificacao_arquivo = models.IntegerField(choices=LISTA_SITUACAO_ARQUIVO, null=True, blank=True)
data_criacao = models.DateField(default=datetime.date.today)
usuario = models.ForeignKey('adesao.Usuario', on_delete=models.CASCADE)
history = HistoricalRecords(bases=[ArquivoHistoricalDiligenciaSimples, ])
class Contato(models.Model):
contatado = models.CharField(max_length=100)
contatante = models.ForeignKey("adesao.Usuario", on_delete=models.CASCADE, related_name="contatos")
data = models.DateField()
discussao = models.TextField()
sistema_cultura = models.ForeignKey("adesao.SistemaCultura", on_delete=models.CASCADE, related_name="contatos")
def save(self, *args, **kwargs):
self.contatante = get_current_user()
super().save(*args, **kwargs)
| culturagovbr/sistema-nacional-cultura | gestao/models.py | Python | agpl-3.0 | 2,555 |
# -*- encoding: utf-8 -*-
from openerp.osv import osv, fields
from idvalidator import is_ncf
from openerp.osv.osv import except_osv
from openerp import netsvc
from datetime import datetime
from openerp.tools.translate import _
import time
class account_invoice(osv.Model):
_inherit = "account.invoice"
_name = "account.invoice"
def _get_reference_type(self, cr, uid, context=None):
return [('none', u'Referencia libre / Nº Fact. Proveedor'),
('01', '01 - Gastos de personal'),
('02', '02 - Gastos por trabajo, suministros y servicios'),
('03', '03 - Arrendamientos'),
('04', '04 - Gastos de Activos Fijos'),
('05', u'05 - Gastos de Representación'),
('06', '06 - Otras Deducciones Admitidas'),
('07', '07 - Gastos Financieros'),
('08', '08 - Gastos Extraordinarios'),
('09', '09 - Compras y Gastos que forman parte del Costo de Venta'),
('10', '10 - Adquisiciones de Activos'),
('11', '11 - Gastos de Seguro')
]
def on_change_fiscal_position(self, cr, uid, ids, value):
fiscal_type = self.pool.get("account.fiscal.position").browse(cr, uid, value).fiscal_type
if fiscal_type in [u'informal', u'minor']:
ncf_required = False
else:
ncf_required = True
return {"value": {'reference_type': fiscal_type, 'ncf_required': ncf_required}}
def onchange_journal_id(self, cr, uid, ids, *args):
if args:
journal = self.pool.get("account.journal").browse(cr, uid, args[0])
ncf_required = True
if journal.ncf_special:
ncf_required = False
return {"value": {'ncf_required': ncf_required}}
else:
return {"value": {}}
def onchange_reference(self, cr, uid, ids, reference, ncf_required):
if not is_ncf(reference.encode("ascii")) and ncf_required:
raise except_osv(u"NCF Invalido!", u"El NCF del proveedor no es válido!")
return False
def action_date_assign(self, cr, uid, ids, *args):
for inv in self.browse(cr, uid, ids):
if inv.journal_id.ncf_special in ['gasto', 'informal']:
self.write(cr, uid, [inv.id], {"reference": False})
if inv.type in ['in_invoice', 'in_refund'] and inv.ncf_required:
if inv.reference_type != 'none' and not is_ncf(inv.reference.encode("ascii")):
raise except_osv(u"NCF Invalido!", u"El NCF del proveedor no es válido!")
# TODO si la entrada de almacen referente a este pedido advertir al contador que debe terminar de recibir
# los productos pendientes o cancelarlos en caso de que se reciba parciarmente debe crear una nota de credito
# borrador
res = self.onchange_payment_term_date_invoice(cr, uid, inv.id, inv.payment_term.id, inv.date_invoice)
if res and res['value']:
self.write(cr, uid, [inv.id], res['value'])
return True
_columns = {
'reference_type': fields.selection(_get_reference_type, 'Payment Reference',
required=True, readonly=False),
'reference': fields.char('Invoice Reference', size=19, help="The partner reference of this invoice."),
'ipf': fields.boolean("Impreso", readonly=True),
'ncf_required': fields.boolean(),
"pay_to": fields.many2one("res.partner", "Pagar a")
}
_sql_constraints = [
# ('number_uniq', 'unique(number, company_id, journal_id, type)', 'Invoice Number must be unique per Company!')
('number_uniq', 'unique(company_id, partner_id, number, journal_id)', u'El NCF para este relacionado ya fue utilizado!'),
]
_defaults = {
"ncf_required": True
}
def _get_journal_id(self, fiscal_type, shop_id, refund):
if refund:
return shop_id.notas_credito_id.id
elif fiscal_type == "final" or fiscal_type is None:
return shop_id.final_id.id
elif fiscal_type == "fiscal":
return shop_id.fiscal_id.id
elif fiscal_type == "special":
return shop_id.especiales_id.id
elif fiscal_type == "gov":
return shop_id.gubernamentales_id.id
else:
return False
def create(self, cr, uid, vals, context=None):
if not context:
context = {}
if context.get('active_model', False) == 'pos.order' and vals.get('type', False) in ["out_invoice", 'out_refund']:
pass
elif context.get('active_model', False) == 'stock.picking.in' and vals.get('type', False) == "out_refund":
pass
elif vals.get('type', False) == "out_invoice":
order_obj = self.pool.get('sale.order')
so_id = order_obj.search(cr, uid, [('name', '=', vals['origin'])])
so = order_obj.browse(cr, uid, so_id, context)[0]
if not vals['fiscal_position']: vals['fiscal_position'] = 2
fiscal_type = so.partner_id.property_account_position.fiscal_type or 'final'
vals['journal_id'] = self._get_journal_id(fiscal_type, so.shop_id, False)
elif vals.get('type', False) == "out_refund":
if vals.get('origin', False):
order_obj = self.pool.get('sale.order')
so_id = order_obj.search(cr, uid, [('name', '=', vals.get('origin', None))])
so = order_obj.browse(cr, uid, so_id, context)[0]
if not vals['fiscal_position']:
vals['fiscal_position'] = 2
vals['journal_id'] = self._get_journal_id(None, so.shop_id, True)
else:
vals['reference'] = u""
inv_obj = self.pool.get('account.invoice')
origin = inv_obj.read(cr, uid, context['active_id'], ['number'])
vals['origin'] = origin["number"]
elif vals.get('type', False) == "in_invoice" and vals.get('fiscal_position', False):
fiscal_type = self.pool.get("account.fiscal.position").browse(cr, uid, vals['fiscal_position']).fiscal_type
vals['reference_type'] = fiscal_type
elif vals.get('type', False) == "in_refund" and vals.get('fiscal_position', False):
vals['reference'] = vals.get('origin', "")
fiscal_type = self.pool.get("account.fiscal.position").browse(cr, uid, vals['fiscal_position']).fiscal_type
vals['reference_type'] = fiscal_type
inv = super(account_invoice, self).create(cr, uid, vals, context)
return inv
# go from canceled state to draft state
def action_cancel_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft', 'internal_number': False})
wf_service = netsvc.LocalService("workflow")
for inv_id in ids:
wf_service.trg_delete(uid, 'account.invoice', inv_id, cr)
wf_service.trg_create(uid, 'account.invoice', inv_id, cr)
return True
def _refund_cleanup_lines(self, cr, uid, lines, context=None):
"""
For each invoice line.
If amount of days since invoice is greater than 30.
For each tax on each invoice line.
If the tax is included in the price.
The tax is replaced with the corresponding tax exempt tax.
If tax is not include in price, no tax will show up in the refund.
"""
result = super(account_invoice, self)._refund_cleanup_lines(cr, uid, lines, context=context)
# For each invoice_line
for x, y, line in result:
inv_obj = self.pool.get('account.invoice').browse(cr, uid, line['invoice_id'], context=context)
inv_date = datetime.strptime(inv_obj['date_invoice'], "%Y-%m-%d").date()
days_diff = datetime.today().date() - inv_date
# If amount of days since invoice is greater than 30:
if days_diff.days > 30:
taxes_ids = []
# For each tax on the invoice line:
for tax_id in line['invoice_line_tax_id'][0][2]:
tax_original = self.pool.get('account.tax').browse(cr, uid, tax_id, context=context)
# If the tax is included in the price:
if tax_original.price_include:
# Replace it with the corresponding tax exempt tax.
tax_replacement = self.pool.get('account.tax').search(cr, uid,
[('type_tax_use', '=', tax_original.type_tax_use),
('amount', '=', tax_original.amount),
('exempt', '=', True),
],
context=context)[0]
# No duplicate taxes allowed
if tax_replacement not in taxes_ids:
taxes_ids.append(tax_replacement)
# If tax is not include in price, no tax will show up in the refund.
line['invoice_line_tax_id'] = [(6, 0, taxes_ids)]
return result
def finalize_invoice_move_lines(self, cr, uid, invoice_browse, move_lines):
if invoice_browse.pay_to:
supplier_account_id = invoice_browse.partner_id.property_account_payable.id
for line in [lines[2] for lines in move_lines]:
if line.get("account_id", False) == supplier_account_id:
line.update({'partner_id': invoice_browse.pay_to.id, 'account_id': invoice_browse.pay_to.property_account_payable.id})
return move_lines
| eneldoserrata/marcos_openerp | marcos_addons/marcos_ncf/account_invoice.py | Python | agpl-3.0 | 10,032 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0067_auto_20160411_1224'),
]
operations = [
migrations.CreateModel(
name='Brick',
fields=[
('crafteditem_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='backend.CraftedItem')),
],
bases=('backend.crafteditem',),
),
migrations.AddField(
model_name='crafteditem',
name='energy',
field=models.IntegerField(default=0),
),
]
| TudorRosca/enklave | server/backend/migrations/0068_auto_20160413_0650.py | Python | agpl-3.0 | 710 |
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
from odoo.exceptions import UserError
class AccountMove(models.Model):
_inherit = "account.move"
pricelist_id = fields.Many2one(
comodel_name="product.pricelist",
string="Pricelist",
readonly=True,
states={"draft": [("readonly", False)]},
)
@api.constrains("pricelist_id", "currency_id")
def _check_currency(self):
for sel in self.filtered(lambda a: a.pricelist_id and a.is_invoice()):
if sel.pricelist_id.currency_id != sel.currency_id:
raise UserError(
_("Pricelist and Invoice need to use the same currency.")
)
@api.onchange("partner_id", "company_id")
def _onchange_partner_id_account_invoice_pricelist(self):
if self.is_invoice():
if (
self.partner_id
and self.move_type in ("out_invoice", "out_refund")
and self.partner_id.property_product_pricelist
):
self.pricelist_id = self.partner_id.property_product_pricelist
self._set_pricelist_currency()
@api.onchange("pricelist_id")
def _set_pricelist_currency(self):
if (
self.is_invoice()
and self.pricelist_id
and self.currency_id != self.pricelist_id.currency_id
):
self.currency_id = self.pricelist_id.currency_id
def button_update_prices_from_pricelist(self):
for inv in self.filtered(lambda r: r.state == "draft"):
inv.invoice_line_ids._onchange_product_id_account_invoice_pricelist()
self.filtered(lambda r: r.state == "draft").with_context(
check_move_validity=False
)._move_autocomplete_invoice_lines_values()
self.filtered(lambda r: r.state == "draft").with_context(
check_move_validity=False
)._recompute_tax_lines()
def _reverse_move_vals(self, default_values, cancel=True):
move_vals = super(AccountMove, self)._reverse_move_vals(
default_values, cancel=cancel
)
if self.pricelist_id:
move_vals["pricelist_id"] = self.pricelist_id.id
return move_vals
class AccountMoveLine(models.Model):
_inherit = "account.move.line"
@api.onchange("product_id", "quantity")
def _onchange_product_id_account_invoice_pricelist(self):
for sel in self:
if not sel.move_id.pricelist_id:
return
sel.with_context(check_move_validity=False).update(
{"price_unit": sel._get_price_with_pricelist()}
)
@api.onchange("product_uom_id")
def _onchange_uom_id(self):
for sel in self:
if (
sel.move_id.is_invoice()
and sel.move_id.state == "draft"
and sel.move_id.pricelist_id
):
price_unit = sel._get_computed_price_unit()
taxes = sel._get_computed_taxes()
if taxes and sel.move_id.fiscal_position_id:
price_subtotal = sel._get_price_total_and_subtotal(
price_unit=price_unit, taxes=taxes
)["price_subtotal"]
accounting_vals = sel._get_fields_onchange_subtotal(
price_subtotal=price_subtotal,
currency=self.move_id.company_currency_id,
)
amount_currency = accounting_vals["amount_currency"]
price_unit = sel._get_fields_onchange_balance(
amount_currency=amount_currency
).get("price_unit", price_unit)
sel.with_context(check_move_validity=False).update(
{"price_unit": price_unit}
)
else:
super(AccountMoveLine, self)._onchange_uom_id()
def _get_real_price_currency(self, product, rule_id, qty, uom, pricelist_id):
PricelistItem = self.env["product.pricelist.item"]
field_name = "lst_price"
currency_id = None
product_currency = product.currency_id
if rule_id:
pricelist_item = PricelistItem.browse(rule_id)
while (
pricelist_item.base == "pricelist"
and pricelist_item.base_pricelist_id
and pricelist_item.base_pricelist_id.discount_policy
== "without_discount"
):
price, rule_id = pricelist_item.base_pricelist_id.with_context(
uom=uom.id
).get_product_price_rule(product, qty, self.move_id.partner_id)
pricelist_item = PricelistItem.browse(rule_id)
if pricelist_item.base == "standard_price":
field_name = "standard_price"
product_currency = product.cost_currency_id
elif (
pricelist_item.base == "pricelist" and pricelist_item.base_pricelist_id
):
field_name = "price"
product = product.with_context(
pricelist=pricelist_item.base_pricelist_id.id
)
product_currency = pricelist_item.base_pricelist_id.currency_id
currency_id = pricelist_item.pricelist_id.currency_id
if not currency_id:
currency_id = product_currency
cur_factor = 1.0
else:
if currency_id.id == product_currency.id:
cur_factor = 1.0
else:
cur_factor = currency_id._get_conversion_rate(
product_currency,
currency_id,
self.company_id or self.env.company,
self.move_id.invoice_date or fields.Date.today(),
)
product_uom = self.env.context.get("uom") or product.uom_id.id
if uom and uom.id != product_uom:
uom_factor = uom._compute_price(1.0, product.uom_id)
else:
uom_factor = 1.0
return product[field_name] * uom_factor * cur_factor, currency_id
def _calculate_discount(self, base_price, final_price):
discount = (base_price - final_price) / base_price * 100
if (discount < 0 and base_price > 0) or (discount > 0 and base_price < 0):
discount = 0.0
return discount
def _get_price_with_pricelist(self):
price_unit = 0.0
if self.move_id.pricelist_id and self.product_id and self.move_id.is_invoice():
if self.move_id.pricelist_id.discount_policy == "with_discount":
product = self.product_id.with_context(
lang=self.move_id.partner_id.lang,
partner=self.move_id.partner_id.id,
quantity=self.quantity,
date_order=self.move_id.invoice_date,
date=self.move_id.invoice_date,
pricelist=self.move_id.pricelist_id.id,
product_uom_id=self.product_uom_id.id,
fiscal_position=(
self.move_id.partner_id.property_account_position_id.id
),
)
tax_obj = self.env["account.tax"]
recalculated_price_unit = (
product.price * self.product_id.uom_id.factor
) / (self.product_uom_id.factor or 1.0)
price_unit = tax_obj._fix_tax_included_price_company(
recalculated_price_unit,
product.taxes_id,
self.tax_ids,
self.company_id,
)
self.with_context(check_move_validity=False).discount = 0.0
else:
product_context = dict(
self.env.context,
partner_id=self.move_id.partner_id.id,
date=self.move_id.invoice_date or fields.Date.today(),
uom=self.product_uom_id.id,
)
final_price, rule_id = self.move_id.pricelist_id.with_context(
product_context
).get_product_price_rule(
self.product_id, self.quantity or 1.0, self.move_id.partner_id
)
base_price, currency = self.with_context(
product_context
)._get_real_price_currency(
self.product_id,
rule_id,
self.quantity,
self.product_uom_id,
self.move_id.pricelist_id.id,
)
if currency != self.move_id.pricelist_id.currency_id:
base_price = currency._convert(
base_price,
self.move_id.pricelist_id.currency_id,
self.move_id.company_id or self.env.company,
self.move_id.invoice_date or fields.Date.today(),
)
price_unit = max(base_price, final_price)
self.with_context(
check_move_validity=False
).discount = self._calculate_discount(base_price, final_price)
return price_unit
def _get_computed_price_unit(self):
price_unit = super(AccountMoveLine, self)._get_computed_price_unit()
if self.move_id.pricelist_id and self.move_id.is_invoice():
price_unit = self._get_price_with_pricelist()
return price_unit
| OCA/account-invoicing | account_invoice_pricelist/models/account_move.py | Python | agpl-3.0 | 9,635 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from openerp import models, fields, api
_logger = logging.getLogger(__name__)
class DocumentPage(models.Model):
"""This class is use to manage Document."""
_name = "document.page"
_inherit = ['mail.thread']
_description = "Document Page"
_order = 'name'
name = fields.Char('Title', required=True)
type = fields.Selection(
[('content', 'Content'), ('category', 'Category')],
'Type',
help="Page type",
default="content"
)
parent_id = fields.Many2one(
'document.page',
'Category',
domain=[('type', '=', 'category')]
)
child_ids = fields.One2many(
'document.page',
'parent_id',
'Children'
)
content = fields.Text(
"Content"
)
display_content = fields.Text(
string='Displayed Content',
compute='_get_display_content'
)
history_ids = fields.One2many(
'document.page.history',
'page_id',
'History'
)
menu_id = fields.Many2one(
'ir.ui.menu',
"Menu",
readonly=True
)
create_date = fields.Datetime(
"Created on",
index=True,
readonly=True
)
create_uid = fields.Many2one(
'res.users',
'Author',
index=True,
readonly=True
)
write_date = fields.Datetime(
"Modification Date",
index=True,
readonly=True)
write_uid = fields.Many2one(
'res.users',
"Last Contributor",
index=True,
readonly=True
)
def _get_page_index(self, page, link=True):
"""Return the index of a document."""
index = []
for subpage in page.child_ids:
index += ["<li>" + self._get_page_index(subpage) +
"</li>"]
r = ''
if link:
r = '<a href="#id=%s">%s</a>' % (page.id, page.name)
if index:
r += "<ul>" + "".join(index) + "</ul>"
return r
def _get_display_content(self):
"""Return the content of a document."""
for page in self:
if page.type == "category":
display_content = self._get_page_index(page, link=False)
else:
display_content = page.content
page.display_content = display_content
@api.onchange("parent_id")
def do_set_content(self):
"""We Set it the right content to the new parent."""
if self.parent_id and not self.content:
if self.parent_id.type == "category":
self.content = self.parent_id.content
def create_history(self, page_id, content):
"""Create the first history of a newly created document."""
history = self.env['document.page.history']
return history.create({
"content": content,
"page_id": page_id
})
@api.multi
def write(self, vals):
"""Write the content and set the history."""
result = super(DocumentPage, self).write(vals)
content = vals.get('content')
if content:
for page in self:
self.create_history(page.id, content)
return result
@api.model
@api.returns('self', lambda value: value.id)
def create(self, vals):
"""Create the first history of a document."""
page_id = super(DocumentPage, self).create(vals)
content = vals.get('content')
if content:
self.create_history(page_id.id, content)
return page_id
| ClearCorp/knowledge | document_page/models/document_page.py | Python | agpl-3.0 | 4,535 |
from comics.aggregator.crawler import CrawlerBase, CrawlerImage
from comics.core.comic_data import ComicDataBase
class ComicData(ComicDataBase):
name = "Lunch (e24.no)"
language = "no"
url = "http://www.e24.no/lunch/"
start_date = "2009-10-21"
rights = "Børge Lund"
class Crawler(CrawlerBase):
history_capable_date = "2012-11-02"
schedule = "Mo,Tu,We,Th,Fr,Sa"
time_zone = "Europe/Oslo"
def crawl(self, pub_date):
url = "http://static.e24.no/images/comics/lunch_%s.gif" % (
pub_date.strftime("%Y%m%d")
)
return CrawlerImage(url)
| jodal/comics | comics/comics/lunche24.py | Python | agpl-3.0 | 607 |
"""
Views related to operations on course objects
"""
import copy
import json
import logging
import random
import string # pylint: disable=deprecated-module
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, Http404
from django.shortcuts import redirect
import django.utils
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods, require_GET
from django.views.decorators.csrf import ensure_csrf_cookie
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import Location
from .component import (
ADVANCED_COMPONENT_TYPES,
SPLIT_TEST_COMPONENT_TYPE,
)
from .item import create_xblock_info
from .library import LIBRARIES_ENABLED
from contentstore import utils
from contentstore.course_group_config import (
COHORT_SCHEME,
GroupConfiguration,
GroupConfigurationsValidationError,
RANDOM_SCHEME,
)
from contentstore.course_info_model import get_course_updates, update_course_updates, delete_course_update
from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError
from contentstore.push_notification import push_notification_enabled
from contentstore.tasks import rerun_course
from contentstore.utils import (
add_instructor,
initialize_permissions,
get_lms_link_for_item,
remove_all_instructors,
reverse_course_url,
reverse_library_url,
reverse_usage_url,
reverse_url,
)
from contentstore.views.entrance_exam import (
create_entrance_exam,
delete_entrance_exam,
update_entrance_exam,
)
from course_action_state.managers import CourseActionStateItemNotFoundError
from course_action_state.models import CourseRerunState, CourseRerunUIStateManager
from course_creators.views import get_course_creator_status, add_user_with_status_unrequested
from edxmako.shortcuts import render_to_response
from microsite_configuration import microsite
from models.settings.course_grading import CourseGradingModel
from models.settings.course_metadata import CourseMetadata
from models.settings.encoder import CourseSettingsEncoder
from openedx.core.djangoapps.content.course_structures.api.v0 import api, errors
from openedx.core.djangoapps.credit.api import is_credit_course, get_credit_requirements
from openedx.core.djangoapps.credit.tasks import update_credit_course_requirements
from openedx.core.djangoapps.ga_optional.api import is_available
from openedx.core.djangoapps.ga_optional.models import (
CUSTOM_LOGO_OPTION_KEY,
LIBRARY_OPTION_KEY,
PROGRESS_RESTRICTION_OPTION_KEY
)
from openedx.core.djangoapps.models.course_details import CourseDetails
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.programs.utils import get_programs
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.lib.course_tabs import CourseTabPluginManager
from openedx.core.lib.courses import course_image_url, custom_logo_url
from openedx.core.lib.ga_course_utils import is_using_jwplayer_course
from openedx.core.lib.js_utils import escape_json_dumps
from student import auth
from student.auth import has_course_author_access, has_studio_write_access, has_studio_read_access
from student.models import CourseAccessRole
from student.roles import (
CourseInstructorRole, CourseStaffRole, CourseCreatorRole, GaGlobalCourseCreatorRole, GlobalStaff, UserBasedRole,
)
from util.date_utils import get_default_time_display
from util.json_request import JsonResponse, JsonResponseBadRequest, expect_json
from util.milestones_helpers import (
is_entrance_exams_enabled,
is_prerequisite_courses_enabled,
is_valid_course_key,
set_prerequisite_courses,
)
from ga_maintenance_cms.models import MaintenanceMessage
from util.organizations_helpers import (
add_organization_course,
get_organization_by_short_name,
organizations_enabled,
)
from util.string_utils import _has_non_ascii_characters
from xmodule.contentstore.content import StaticContent
from xmodule.course_module import CourseFields
from xmodule.course_module import DEFAULT_START_DATE
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore import EdxJSONEncoder
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, DuplicateCourseError
from xmodule.tabs import CourseTab, CourseTabList, InvalidTabsException
log = logging.getLogger(__name__)
__all__ = ['course_info_handler', 'course_handler', 'course_listing',
'library_listing',
'course_info_update_handler', 'course_search_index_handler',
'course_rerun_handler',
'settings_handler',
'grading_handler',
'advanced_settings_handler',
'course_notifications_handler',
'textbooks_list_handler', 'textbooks_detail_handler',
'group_configurations_list_handler', 'group_configurations_detail_handler']
class AccessListFallback(Exception):
"""
An exception that is raised whenever we need to `fall back` to fetching *all* courses
available to a user, rather than using a shorter method (i.e. fetching by group)
"""
pass
def get_course_and_check_access(course_key, user, depth=0):
"""
Internal method used to calculate and return the locator and course module
for the view functions in this file.
"""
if not has_studio_read_access(user, course_key):
raise PermissionDenied()
course_module = modulestore().get_course(course_key, depth=depth)
return course_module
def reindex_course_and_check_access(course_key, user):
"""
Internal method used to restart indexing on a course.
"""
if not has_course_author_access(user, course_key):
raise PermissionDenied()
return CoursewareSearchIndexer.do_course_reindex(modulestore(), course_key)
@login_required
def course_notifications_handler(request, course_key_string=None, action_state_id=None):
"""
Handle incoming requests for notifications in a RESTful way.
course_key_string and action_state_id must both be set; else a HttpBadResponseRequest is returned.
For each of these operations, the requesting user must have access to the course;
else a PermissionDenied error is returned.
GET
json: return json representing information about the notification (action, state, etc)
DELETE
json: return json repressing success or failure of dismissal/deletion of the notification
PUT
Raises a NotImplementedError.
POST
Raises a NotImplementedError.
"""
# ensure that we have a course and an action state
if not course_key_string or not action_state_id:
return HttpResponseBadRequest()
response_format = request.REQUEST.get('format', 'html')
course_key = CourseKey.from_string(course_key_string)
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if not has_studio_write_access(request.user, course_key):
raise PermissionDenied()
if request.method == 'GET':
return _course_notifications_json_get(action_state_id)
elif request.method == 'DELETE':
# we assume any delete requests dismiss actions from the UI
return _dismiss_notification(request, action_state_id)
elif request.method == 'PUT':
raise NotImplementedError()
elif request.method == 'POST':
raise NotImplementedError()
else:
return HttpResponseBadRequest()
else:
return HttpResponseNotFound()
def _course_notifications_json_get(course_action_state_id):
"""
Return the action and the action state for the given id
"""
try:
action_state = CourseRerunState.objects.find_first(id=course_action_state_id)
except CourseActionStateItemNotFoundError:
return HttpResponseBadRequest()
action_state_info = {
'action': action_state.action,
'state': action_state.state,
'should_display': action_state.should_display
}
return JsonResponse(action_state_info)
def _dismiss_notification(request, course_action_state_id): # pylint: disable=unused-argument
"""
Update the display of the course notification
"""
try:
action_state = CourseRerunState.objects.find_first(id=course_action_state_id)
except CourseActionStateItemNotFoundError:
# Can't dismiss a notification that doesn't exist in the first place
return HttpResponseBadRequest()
if action_state.state == CourseRerunUIStateManager.State.FAILED:
# We remove all permissions for this course key at this time, since
# no further access is required to a course that failed to be created.
remove_all_instructors(action_state.course_key)
# The CourseRerunState is no longer needed by the UI; delete
action_state.delete()
return JsonResponse({'success': True})
# pylint: disable=unused-argument
@login_required
def course_handler(request, course_key_string=None):
"""
The restful handler for course specific requests.
It provides the course tree with the necessary information for identifying and labeling the parts. The root
will typically be a 'course' object but may not be especially as we support modules.
GET
html: return course listing page if not given a course id
html: return html page overview for the given course if given a course id
json: return json representing the course branch's index entry as well as dag w/ all of the children
replaced w/ json docs where each doc has {'_id': , 'display_name': , 'children': }
POST
json: create a course, return resulting json
descriptor (same as in GET course/...). Leaving off /branch/draft would imply create the course w/ default
branches. Cannot change the structure contents ('_id', 'display_name', 'children') but can change the
index entry.
PUT
json: update this course (index entry not xblock) such as repointing head, changing display name, org,
course, run. Return same json as above.
DELETE
json: delete this branch from this course (leaving off /branch/draft would imply delete the course)
"""
try:
response_format = request.REQUEST.get('format', 'html')
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=None)
return JsonResponse(_course_outline_json(request, course_module))
elif request.method == 'POST': # not sure if this is only post. If one will have ids, it goes after access
return _create_or_rerun_course(request)
elif not has_studio_write_access(request.user, CourseKey.from_string(course_key_string)):
raise PermissionDenied()
elif request.method == 'PUT':
raise NotImplementedError()
elif request.method == 'DELETE':
raise NotImplementedError()
else:
return HttpResponseBadRequest()
elif request.method == 'GET': # assume html
if course_key_string is None:
return redirect(reverse("home"))
else:
return course_index(request, CourseKey.from_string(course_key_string))
else:
return HttpResponseNotFound()
except InvalidKeyError:
raise Http404
@login_required
@ensure_csrf_cookie
@require_http_methods(["GET"])
def course_rerun_handler(request, course_key_string):
"""
The restful handler for course reruns.
GET
html: return html page with form to rerun a course for the given course id
"""
# Only global staff (PMs) and GaGlobalCourseCreator are able to rerun courses during the soft launch
# Note: GaGlobalCourseCreator has access to rerun (#2150)
if not GlobalStaff().has_user(request.user) and not GaGlobalCourseCreatorRole().has_user(request.user):
raise PermissionDenied()
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=3)
if request.method == 'GET':
return render_to_response('course-create-rerun.html', {
'source_course_key': course_key,
'display_name': course_module.display_name,
'user': request.user,
'course_creator_status': _get_course_creator_status(request.user),
'allow_unicode_course_id': settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID', False)
})
@login_required
@ensure_csrf_cookie
@require_GET
def course_search_index_handler(request, course_key_string):
"""
The restful handler for course indexing.
GET
html: return status of indexing task
json: return status of indexing task
"""
# Only global staff (PMs) and GaGlobalCourseCreator are able to index courses
# Note: GaGlobalCourseCreator has access to course search index (#2150)
if not GlobalStaff().has_user(request.user) and not GaGlobalCourseCreatorRole().has_user(request.user):
raise PermissionDenied()
course_key = CourseKey.from_string(course_key_string)
content_type = request.META.get('CONTENT_TYPE', None)
if content_type is None:
content_type = "application/json; charset=utf-8"
with modulestore().bulk_operations(course_key):
try:
reindex_course_and_check_access(course_key, request.user)
except SearchIndexingError as search_err:
return HttpResponse(escape_json_dumps({
"user_message": search_err.error_list
}), content_type=content_type, status=500)
return HttpResponse(escape_json_dumps({
"user_message": _("Course has been successfully reindexed.")
}), content_type=content_type, status=200)
def _course_outline_json(request, course_module):
"""
Returns a JSON representation of the course module and recursively all of its children.
"""
return create_xblock_info(
course_module,
include_child_info=True,
course_outline=True,
include_children_predicate=lambda xblock: not xblock.category == 'vertical',
user=request.user
)
def _accessible_courses_list(request):
"""
List all courses available to the logged in user by iterating through all the courses
"""
def course_filter(course):
"""
Filter out unusable and inaccessible courses
"""
if isinstance(course, ErrorDescriptor):
return False
# pylint: disable=fixme
# TODO remove this condition when templates purged from db
if course.location.course == 'templates':
return False
return has_studio_read_access(request.user, course.id)
courses = filter(course_filter, modulestore().get_courses())
in_process_course_actions = [
course for course in
CourseRerunState.objects.find_all(
exclude_args={'state': CourseRerunUIStateManager.State.SUCCEEDED}, should_display=True
)
if has_studio_read_access(request.user, course.course_key)
]
return courses, in_process_course_actions
def _accessible_courses_list_from_groups(request):
"""
List all courses available to the logged in user by reversing access group names
"""
courses_list = {}
in_process_course_actions = []
instructor_courses = UserBasedRole(request.user, CourseInstructorRole.ROLE).courses_with_role()
staff_courses = UserBasedRole(request.user, CourseStaffRole.ROLE).courses_with_role()
all_courses = instructor_courses | staff_courses
for course_access in all_courses:
course_key = course_access.course_id
if course_key is None:
# If the course_access does not have a course_id, it's an org-based role, so we fall back
raise AccessListFallback
if course_key not in courses_list:
# check for any course action state for this course
in_process_course_actions.extend(
CourseRerunState.objects.find_all(
exclude_args={'state': CourseRerunUIStateManager.State.SUCCEEDED},
should_display=True,
course_key=course_key,
)
)
# check for the course itself
try:
course = modulestore().get_course(course_key)
except ItemNotFoundError:
# If a user has access to a course that doesn't exist, don't do anything with that course
pass
if course is not None and not isinstance(course, ErrorDescriptor):
# ignore deleted or errored courses
courses_list[course_key] = course
return courses_list.values(), in_process_course_actions
def _accessible_libraries_list(user):
"""
List all libraries available to the logged in user by iterating through all libraries
"""
# No need to worry about ErrorDescriptors - split's get_libraries() never returns them.
return [lib for lib in modulestore().get_libraries() if has_studio_read_access(user, lib.location.library_key)]
@login_required
@ensure_csrf_cookie
def course_listing(request):
"""
List all courses available to the logged in user
"""
courses, in_process_course_actions = get_courses_accessible_to_user(request)
libraries = _accessible_libraries_list(request.user) if LIBRARIES_ENABLED else []
programs_config = ProgramsApiConfig.current()
raw_programs = get_programs(request.user) if programs_config.is_studio_tab_enabled else []
# Sort programs alphabetically by name.
# TODO: Support ordering in the Programs API itself.
programs = sorted(raw_programs, key=lambda p: p['name'].lower())
def format_in_process_course_view(uca):
"""
Return a dict of the data which the view requires for each unsucceeded course
"""
return {
'display_name': uca.display_name,
'course_key': unicode(uca.course_key),
'org': uca.course_key.org,
'number': uca.course_key.course,
'run': uca.course_key.run,
'is_failed': True if uca.state == CourseRerunUIStateManager.State.FAILED else False,
'is_in_progress': True if uca.state == CourseRerunUIStateManager.State.IN_PROGRESS else False,
'dismiss_link': reverse_course_url(
'course_notifications_handler',
uca.course_key,
kwargs={
'action_state_id': uca.id,
},
) if uca.state == CourseRerunUIStateManager.State.FAILED else ''
}
def format_library_for_view(library):
"""
Return a dict of the data which the view requires for each library
"""
return {
'display_name': library.display_name,
'library_key': unicode(library.location.library_key),
'url': reverse_library_url('library_handler', unicode(library.location.library_key)),
'org': library.display_org_with_default,
'number': library.display_number_with_default,
'can_edit': has_studio_write_access(request.user, library.location.library_key),
}
courses = _remove_in_process_courses(courses, in_process_course_actions)
in_process_course_actions = [format_in_process_course_view(uca) for uca in in_process_course_actions]
maintenance_message = MaintenanceMessage.messages_for_all()
return render_to_response('index.html', {
'courses': courses,
'in_process_course_actions': in_process_course_actions,
'libraries_enabled': LIBRARIES_ENABLED,
'libraries': [format_library_for_view(lib) for lib in libraries],
'show_new_library_button': LIBRARIES_ENABLED and request.user.is_active,
'user': request.user,
'request_course_creator_url': reverse('contentstore.views.request_course_creator'),
'course_creator_status': _get_course_creator_status(request.user),
# Note: GaGlobalCourseCreator can see the rerun status (#2150)
'rerun_creator_status': GlobalStaff().has_user(request.user) or GaGlobalCourseCreatorRole().has_user(request.user),
'allow_unicode_course_id': settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID', False),
'allow_course_reruns': settings.FEATURES.get('ALLOW_COURSE_RERUNS', True),
'maintenance_message': maintenance_message,
# Note: GaGlobalCourseCreator has same authority a global staff in studio (#2150)
'is_programs_enabled': programs_config.is_studio_tab_enabled and (
request.user.is_staff or GaGlobalCourseCreatorRole().has_user(request.user)),
'programs': programs,
'program_authoring_url': reverse('programs'),
})
@login_required
@ensure_csrf_cookie
def library_listing(request, course_key_string=None):
"""
List all libraries for the course
"""
def format_library_for_view(library, course):
"""
Return a dict of the data which the view requires for each library
"""
return {
'display_name': library.display_name,
'library_key': unicode(library.location.library_key),
'url': reverse_course_url('course_library_handler', course, kwargs={'library_key_string': unicode(library.location.library_key)}),
'org': library.display_org_with_default,
'number': library.display_number_with_default,
'can_edit': has_studio_write_access(request.user, library.location.library_key),
}
maintenance_message = MaintenanceMessage.messages_for_all()
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=None)
instructor_courses = UserBasedRole(request.user, CourseInstructorRole.ROLE).courses_with_role()
global_staff = GlobalStaff().has_user(request.user)
# Note: GaGlobalCourseCreator has access to library listing (#2150)
ga_global_course_creator = GaGlobalCourseCreatorRole().has_user(request.user)
if not course_module or not is_available(LIBRARY_OPTION_KEY, course_key):
raise Http404
if not instructor_courses and not global_staff and not ga_global_course_creator:
raise Http404
target_libraries = course_module.target_library
libraries = modulestore().get_libraries()
for library in libraries[:]:
if unicode(library.location.library_key) not in target_libraries:
libraries.remove(library)
return render_to_response('index_lib.html', {
'context_course': course_module,
'libraries_enabled': LIBRARIES_ENABLED,
'libraries': [format_library_for_view(lib, course_key) for lib in libraries],
'show_new_library_button': LIBRARIES_ENABLED and request.user.is_active,
'library_option': is_available(LIBRARY_OPTION_KEY, course_key),
'user': request.user,
'maintenance_message': maintenance_message,
'allow_unicode_course_id': settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID', False)
})
def _get_rerun_link_for_item(course_key):
""" Returns the rerun link for the given course key. """
return reverse_course_url('course_rerun_handler', course_key)
def _deprecated_blocks_info(course_module, deprecated_block_types):
"""
Returns deprecation information about `deprecated_block_types`
Arguments:
course_module (CourseDescriptor): course object
deprecated_block_types (list): list of deprecated blocks types
Returns:
Dict with following keys:
block_types (list): list containing types of all deprecated blocks
block_types_enabled (bool): True if any or all `deprecated_blocks` present in Advanced Module List else False
blocks (list): List of `deprecated_block_types` component names and their parent's url
advance_settings_url (str): URL to advance settings page
"""
data = {
'block_types': deprecated_block_types,
'block_types_enabled': any(
block_type in course_module.advanced_modules for block_type in deprecated_block_types
),
'blocks': [],
'advance_settings_url': reverse_course_url('advanced_settings_handler', course_module.id)
}
try:
structure_data = api.course_structure(course_module.id, block_types=deprecated_block_types)
except errors.CourseStructureNotAvailableError:
return data
blocks = []
for block in structure_data['blocks'].values():
blocks.append([reverse_usage_url('container_handler', block['parent']), block['display_name']])
data['blocks'].extend(blocks)
return data
@login_required
@ensure_csrf_cookie
def course_index(request, course_key):
"""
Display an editable course overview.
org, course, name: Attributes of the Location for the item to edit
"""
# A depth of None implies the whole course. The course outline needs this in order to compute has_changes.
# A unit may not have a draft version, but one of its components could, and hence the unit itself has changes.
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=None)
if not course_module:
raise Http404
lms_link = get_lms_link_for_item(course_module.location)
reindex_link = None
if settings.FEATURES.get('ENABLE_COURSEWARE_INDEX', False):
reindex_link = "/course/{course_id}/search_reindex".format(course_id=unicode(course_key))
sections = course_module.get_children()
course_structure = _course_outline_json(request, course_module)
locator_to_show = request.REQUEST.get('show', None)
course_release_date = get_default_time_display(course_module.start) if course_module.start != DEFAULT_START_DATE else _("Unscheduled")
settings_url = reverse_course_url('settings_handler', course_key)
try:
current_action = CourseRerunState.objects.find_first(course_key=course_key, should_display=True)
except (ItemNotFoundError, CourseActionStateItemNotFoundError):
current_action = None
deprecated_blocks_info = _deprecated_blocks_info(course_module, settings.DEPRECATED_BLOCK_TYPES)
return render_to_response('course_outline.html', {
'context_course': course_module,
'lms_link': lms_link,
'sections': sections,
'course_structure': course_structure,
'initial_state': course_outline_initial_state(locator_to_show, course_structure) if locator_to_show else None,
'rerun_notification_id': current_action.id if current_action else None,
'course_release_date': course_release_date,
'settings_url': settings_url,
'reindex_link': reindex_link,
'deprecated_blocks_info': deprecated_blocks_info,
'notification_dismiss_url': reverse_course_url(
'course_notifications_handler',
current_action.course_key,
kwargs={
'action_state_id': current_action.id,
},
) if current_action else None,
'library_option': is_available(LIBRARY_OPTION_KEY, course_key),
'is_restricted_in_progress': is_available(PROGRESS_RESTRICTION_OPTION_KEY, course_key),
})
def get_courses_accessible_to_user(request):
"""
Try to get all courses by first reversing django groups and fallback to old method if it fails
Note: overhead of pymongo reads will increase if getting courses from django groups fails
"""
# Note: GaGlobalCourseCreator has access to courses (#2150)
if GlobalStaff().has_user(request.user) or GaGlobalCourseCreatorRole().has_user(request.user):
# user has global access so no need to get courses from django groups
courses, in_process_course_actions = _accessible_courses_list(request)
else:
try:
courses, in_process_course_actions = _accessible_courses_list_from_groups(request)
except AccessListFallback:
# user have some old groups or there was some error getting courses from django groups
# so fallback to iterating through all courses
courses, in_process_course_actions = _accessible_courses_list(request)
return courses, in_process_course_actions
def _remove_in_process_courses(courses, in_process_course_actions):
"""
removes any in-process courses in courses list. in-process actually refers to courses
that are in the process of being generated for re-run
"""
def format_course_for_view(course):
"""
Return a dict of the data which the view requires for each course
"""
return {
'display_name': course.display_name,
'course_key': unicode(course.location.course_key),
'url': reverse_course_url('course_handler', course.id),
'lms_link': get_lms_link_for_item(course.location),
'rerun_link': _get_rerun_link_for_item(course.id),
'org': course.display_org_with_default,
'number': course.display_number_with_default,
'run': course.location.run
}
in_process_action_course_keys = [uca.course_key for uca in in_process_course_actions]
courses = [
format_course_for_view(c)
for c in courses
if not isinstance(c, ErrorDescriptor) and (c.id not in in_process_action_course_keys)
]
return courses
def course_outline_initial_state(locator_to_show, course_structure):
"""
Returns the desired initial state for the course outline view. If the 'show' request parameter
was provided, then the view's initial state will be to have the desired item fully expanded
and to scroll to see the new item.
"""
def find_xblock_info(xblock_info, locator):
"""
Finds the xblock info for the specified locator.
"""
if xblock_info['id'] == locator:
return xblock_info
children = xblock_info['child_info']['children'] if xblock_info.get('child_info', None) else None
if children:
for child_xblock_info in children:
result = find_xblock_info(child_xblock_info, locator)
if result:
return result
return None
def collect_all_locators(locators, xblock_info):
"""
Collect all the locators for an xblock and its children.
"""
locators.append(xblock_info['id'])
children = xblock_info['child_info']['children'] if xblock_info.get('child_info', None) else None
if children:
for child_xblock_info in children:
collect_all_locators(locators, child_xblock_info)
selected_xblock_info = find_xblock_info(course_structure, locator_to_show)
if not selected_xblock_info:
return None
expanded_locators = []
collect_all_locators(expanded_locators, selected_xblock_info)
return {
'locator_to_show': locator_to_show,
'expanded_locators': expanded_locators
}
@expect_json
def _create_or_rerun_course(request):
"""
To be called by requests that create a new destination course (i.e., create_new_course and rerun_course)
Returns the destination course_key and overriding fields for the new course.
Raises DuplicateCourseError and InvalidKeyError
"""
# Note: GaGlobalCourseCreator has access to create or rerun course (#2150)
if not auth.user_has_role(request.user, CourseCreatorRole()) \
and not auth.user_has_role(request.user, GaGlobalCourseCreatorRole()):
raise PermissionDenied()
try:
org = request.json.get('org')
course = request.json.get('number', request.json.get('course'))
display_name = request.json.get('display_name')
# force the start date for reruns and allow us to override start via the client
start = request.json.get('start', CourseFields.start.default)
run = request.json.get('run')
# allow/disable unicode characters in course_id according to settings
if not settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID'):
if _has_non_ascii_characters(org) or _has_non_ascii_characters(course) or _has_non_ascii_characters(run):
return JsonResponse(
{'error': _('Special characters not allowed in organization, course number, and course run.')},
status=400
)
fields = {'start': start}
if display_name is not None:
if len(display_name) > settings.MAX_LENGTH_COURSE_DISPLAY_NAME:
return JsonResponse({
'ErrMsg': _(
'Course name, please be up to {max_length} characters.'
).format(max_length=settings.MAX_LENGTH_COURSE_DISPLAY_NAME),
})
fields['display_name'] = display_name
# Set a unique wiki_slug for newly created courses. To maintain active wiki_slugs for
# existing xml courses this cannot be changed in CourseDescriptor.
# # TODO get rid of defining wiki slug in this org/course/run specific way and reconcile
# w/ xmodule.course_module.CourseDescriptor.__init__
wiki_slug = u"{0}.{1}.{2}".format(org, course, run)
definition_data = {'wiki_slug': wiki_slug}
fields.update(definition_data)
if 'source_course_key' in request.json:
return _rerun_course(request, org, course, run, fields)
else:
return _create_new_course(request, org, course, run, fields)
except DuplicateCourseError:
return JsonResponse({
'ErrMsg': _(
'There is already a course defined with the same '
'organization and course number. Please '
'change either organization or course number to be unique.'
),
'OrgErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'),
'CourseErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'),
})
except InvalidKeyError as error:
return JsonResponse({
"ErrMsg": _("Unable to create course '{name}'.\n\n{err}").format(name=display_name, err=error.message)}
)
def _create_new_course(request, org, number, run, fields):
"""
Create a new course.
Returns the URL for the course overview page.
Raises DuplicateCourseError if the course already exists
"""
org_data = get_organization_by_short_name(org)
if not org_data and organizations_enabled():
return JsonResponse(
{'error': _('You must link this course to an organization in order to continue. '
'Organization you selected does not exist in the system, '
'you will need to add it to the system')},
status=400
)
store_for_new_course = modulestore().default_modulestore.get_modulestore_type()
new_course = create_new_course_in_store(store_for_new_course, request.user, org, number, run, fields)
add_organization_course(org_data, new_course.id)
return JsonResponse({
'url': reverse_course_url('course_handler', new_course.id),
'course_key': unicode(new_course.id),
})
def create_new_course_in_store(store, user, org, number, run, fields):
"""
Create course in store w/ handling instructor enrollment, permissions, and defaulting the wiki slug.
Separated out b/c command line course creation uses this as well as the web interface.
"""
# Set default language from settings and enable web certs
fields.update({
'language': getattr(settings, 'DEFAULT_COURSE_LANGUAGE', 'en'),
'cert_html_view_enabled': True,
})
with modulestore().default_store(store):
# Creating the course raises DuplicateCourseError if an existing course with this org/name is found
new_course = modulestore().create_course(
org,
number,
run,
user.id,
fields=fields,
)
# Make sure user has instructor and staff access to the new course
add_instructor(new_course.id, user, user)
# Initialize permissions for user in the new course
initialize_permissions(new_course.id, user)
return new_course
def _rerun_course(request, org, number, run, fields):
"""
Reruns an existing course.
Returns the URL for the course listing page.
"""
source_course_key = CourseKey.from_string(request.json.get('source_course_key'))
# verify user has access to the original course
if not has_studio_write_access(request.user, source_course_key):
raise PermissionDenied()
# create destination course key
store = modulestore()
with store.default_store('split'):
destination_course_key = store.make_course_key(org, number, run)
# verify org course and run don't already exist
if store.has_course(destination_course_key, ignore_case=True):
raise DuplicateCourseError(source_course_key, destination_course_key)
# Make sure user has instructor and staff access to the destination course
# so the user can see the updated status for that course
add_instructor(destination_course_key, request.user, request.user)
# Mark the action as initiated
CourseRerunState.objects.initiated(source_course_key, destination_course_key, request.user, fields['display_name'])
# Clear the fields that must be reset for the rerun
fields['advertised_start'] = None
# update target_library
source_course = modulestore().get_course(source_course_key)
target_libraries = source_course.target_library if source_course else None
# Rerun the course as a new celery task
json_fields = json.dumps(fields, cls=EdxJSONEncoder)
rerun_course.delay(unicode(source_course_key), unicode(destination_course_key), request.user.id, json_fields, target_libraries)
# Return course listing page
return JsonResponse({
'url': reverse_url('course_handler'),
'destination_course_key': unicode(destination_course_key)
})
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(["GET"])
def course_info_handler(request, course_key_string):
"""
GET
html: return html for editing the course info handouts and updates.
"""
try:
course_key = CourseKey.from_string(course_key_string)
except InvalidKeyError:
raise Http404
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if not course_module:
raise Http404
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
custom_logo_enabled = is_available(CUSTOM_LOGO_OPTION_KEY, course_key)
return render_to_response(
'course_info.html',
{
'context_course': course_module,
'updates_url': reverse_course_url('course_info_update_handler', course_key),
'handouts_locator': course_key.make_usage_key('course_info', 'handouts'),
'base_asset_url': StaticContent.get_base_url_path_for_course_assets(course_module.id),
'push_notification_enabled': push_notification_enabled(),
'custom_logo_enabled': custom_logo_enabled,
'custom_logo_for_url': custom_logo_url(course_module),
'library_option': is_available(LIBRARY_OPTION_KEY, course_key)
}
)
else:
return HttpResponseBadRequest("Only supports html requests")
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
@expect_json
def course_info_update_handler(request, course_key_string, provided_id=None):
"""
restful CRUD operations on course_info updates.
provided_id should be none if it's new (create) and index otherwise.
GET
json: return the course info update models
POST
json: create an update
PUT or DELETE
json: change an existing update
"""
if 'application/json' not in request.META.get('HTTP_ACCEPT', 'application/json'):
return HttpResponseBadRequest("Only supports json requests")
course_key = CourseKey.from_string(course_key_string)
usage_key = course_key.make_usage_key('course_info', 'updates')
if provided_id == '':
provided_id = None
# check that logged in user has permissions to this item (GET shouldn't require this level?)
if not has_studio_write_access(request.user, usage_key.course_key):
raise PermissionDenied()
if request.method == 'GET':
course_updates = get_course_updates(usage_key, provided_id, request.user.id)
if isinstance(course_updates, dict) and course_updates.get('error'):
return JsonResponse(course_updates, course_updates.get('status', 400))
else:
return JsonResponse(course_updates)
elif request.method == 'DELETE':
try:
return JsonResponse(delete_course_update(usage_key, request.json, provided_id, request.user))
except:
return HttpResponseBadRequest(
"Failed to delete",
content_type="text/plain"
)
# can be either and sometimes django is rewriting one to the other:
elif request.method in ('POST', 'PUT'):
try:
return JsonResponse(update_course_updates(usage_key, request.json, provided_id, request.user))
except:
return HttpResponseBadRequest(
"Failed to save",
content_type="text/plain"
)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "PUT", "POST"))
@expect_json
def settings_handler(request, course_key_string):
"""
Course settings for dates and about pages
GET
html: get the page
json: get the CourseDetails model
PUT
json: update the Course and About xblocks through the CourseDetails model
"""
course_key = CourseKey.from_string(course_key_string)
credit_eligibility_enabled = settings.FEATURES.get('ENABLE_CREDIT_ELIGIBILITY', False)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
upload_asset_url = reverse_course_url('assets_handler', course_key)
# see if the ORG of this course can be attributed to a 'Microsite'. In that case, the
# course about page should be editable in Studio
marketing_site_enabled = microsite.get_value_for_org(
course_module.location.org,
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
about_page_editable = not marketing_site_enabled
# Note: GaGlobalCourseCreator can edit enrollment end date the course (#2150)
enrollment_end_editable = GlobalStaff().has_user(request.user) or not marketing_site_enabled or \
GaGlobalCourseCreatorRole().has_user(request.user)
short_description_editable = settings.FEATURES.get('EDITABLE_SHORT_DESCRIPTION', True)
self_paced_enabled = SelfPacedConfiguration.current().enabled
custom_logo_enabled = is_available(CUSTOM_LOGO_OPTION_KEY, course_key)
settings_context = {
'context_course': course_module,
'course_locator': course_key,
'lms_link_for_about_page': utils.get_lms_link_for_about_page(course_key),
'course_image_url': course_image_url(course_module),
'details_url': reverse_course_url('settings_handler', course_key),
'about_page_editable': about_page_editable,
'short_description_editable': short_description_editable,
'upload_asset_url': upload_asset_url,
'course_handler_url': reverse_course_url('course_handler', course_key),
'language_options': settings.ALL_LANGUAGES,
'credit_eligibility_enabled': credit_eligibility_enabled,
'is_credit_course': False,
'show_min_grade_warning': False,
'enrollment_end_editable': enrollment_end_editable,
'is_prerequisite_courses_enabled': is_prerequisite_courses_enabled(),
'is_entrance_exams_enabled': is_entrance_exams_enabled(),
'self_paced_enabled': self_paced_enabled,
'custom_logo_enabled': custom_logo_enabled,
'custom_logo_for_url': custom_logo_url(course_module),
'library_option': is_available(LIBRARY_OPTION_KEY, course_key),
'use_jwplayer': is_using_jwplayer_course(course_module),
}
if is_prerequisite_courses_enabled():
courses, in_process_course_actions = get_courses_accessible_to_user(request)
# exclude current course from the list of available courses
courses = [course for course in courses if course.id != course_key]
if courses:
courses = _remove_in_process_courses(courses, in_process_course_actions)
settings_context.update({'possible_pre_requisite_courses': courses})
if credit_eligibility_enabled:
if is_credit_course(course_key):
# get and all credit eligibility requirements
credit_requirements = get_credit_requirements(course_key)
# pair together requirements with same 'namespace' values
paired_requirements = {}
for requirement in credit_requirements:
namespace = requirement.pop("namespace")
paired_requirements.setdefault(namespace, []).append(requirement)
# if 'minimum_grade_credit' of a course is not set or 0 then
# show warning message to course author.
show_min_grade_warning = False if course_module.minimum_grade_credit > 0 else True
settings_context.update(
{
'is_credit_course': True,
'credit_requirements': paired_requirements,
'show_min_grade_warning': show_min_grade_warning,
}
)
return render_to_response('settings.html', settings_context)
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
course_details = CourseDetails.fetch(course_key)
return JsonResponse(
course_details,
# encoder serializes dates, old locations, and instances
encoder=CourseSettingsEncoder
)
# For every other possible method type submitted by the caller...
else:
# if pre-requisite course feature is enabled set pre-requisite course
if is_prerequisite_courses_enabled():
prerequisite_course_keys = request.json.get('pre_requisite_courses', [])
if prerequisite_course_keys:
if not all(is_valid_course_key(course_key) for course_key in prerequisite_course_keys):
return JsonResponseBadRequest({"error": _("Invalid prerequisite course key")})
set_prerequisite_courses(course_key, prerequisite_course_keys)
# If the entrance exams feature has been enabled, we'll need to check for some
# feature-specific settings and handle them accordingly
# We have to be careful that we're only executing the following logic if we actually
# need to create or delete an entrance exam from the specified course
if is_entrance_exams_enabled():
course_entrance_exam_present = course_module.entrance_exam_enabled
entrance_exam_enabled = request.json.get('entrance_exam_enabled', '') == 'true'
ee_min_score_pct = request.json.get('entrance_exam_minimum_score_pct', None)
# If the entrance exam box on the settings screen has been checked...
if entrance_exam_enabled:
# Load the default minimum score threshold from settings, then try to override it
entrance_exam_minimum_score_pct = float(settings.ENTRANCE_EXAM_MIN_SCORE_PCT)
if ee_min_score_pct:
entrance_exam_minimum_score_pct = float(ee_min_score_pct)
if entrance_exam_minimum_score_pct.is_integer():
entrance_exam_minimum_score_pct = entrance_exam_minimum_score_pct / 100
entrance_exam_minimum_score_pct = unicode(entrance_exam_minimum_score_pct)
# If there's already an entrance exam defined, we'll update the existing one
if course_entrance_exam_present:
exam_data = {
'entrance_exam_minimum_score_pct': entrance_exam_minimum_score_pct
}
update_entrance_exam(request, course_key, exam_data)
# If there's no entrance exam defined, we'll create a new one
else:
create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct)
# If the entrance exam box on the settings screen has been unchecked,
# and the course has an entrance exam attached...
elif not entrance_exam_enabled and course_entrance_exam_present:
delete_entrance_exam(request, course_key)
# Perform the normal update workflow for the CourseDetails model
return JsonResponse(
CourseDetails.update_from_json(course_key, request.json, request.user),
encoder=CourseSettingsEncoder
)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
@expect_json
def grading_handler(request, course_key_string, grader_index=None):
"""
Course Grading policy configuration
GET
html: get the page
json no grader_index: get the CourseGrading model (graceperiod, cutoffs, and graders)
json w/ grader_index: get the specific grader
PUT
json no grader_index: update the Course through the CourseGrading model
json w/ grader_index: create or update the specific grader (create if index out of range)
"""
course_key = CourseKey.from_string(course_key_string)
if request.user.is_staff:
certificate_url = 'http://' + str(settings.LMS_BASE) + '/certificate'
else:
studio_user_count = CourseAccessRole.objects.filter(user=request.user, course_id=course_key,
role__in=['instructor', 'staff']).count()
if studio_user_count:
certificate_url = 'http://' + str(settings.LMS_BASE) + '/certificate'
else:
certificate_url = ''
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
course_details = CourseGradingModel.fetch(course_key)
return render_to_response('settings_graders.html', {
'context_course': course_module,
'course_locator': course_key,
'course_details': course_details,
'grading_url': reverse_course_url('grading_handler', course_key),
'is_credit_course': is_credit_course(course_key),
'library_option': is_available(LIBRARY_OPTION_KEY, course_key),
'certificate_url': certificate_url,
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
if grader_index is None:
return JsonResponse(
CourseGradingModel.fetch(course_key),
# encoder serializes dates, old locations, and instances
encoder=CourseSettingsEncoder
)
else:
return JsonResponse(CourseGradingModel.fetch_grader(course_key, grader_index))
elif request.method in ('POST', 'PUT'): # post or put, doesn't matter.
# update credit course requirements if 'minimum_grade_credit'
# field value is changed
if 'minimum_grade_credit' in request.json:
update_credit_course_requirements.delay(unicode(course_key))
# None implies update the whole model (cutoffs, graceperiod, and graders) not a specific grader
if grader_index is None:
return JsonResponse(
CourseGradingModel.update_from_json(course_key, request.json, request.user),
encoder=CourseSettingsEncoder
)
else:
return JsonResponse(
CourseGradingModel.update_grader_from_json(course_key, request.json, request.user)
)
elif request.method == "DELETE" and grader_index is not None:
CourseGradingModel.delete_grader(course_key, grader_index, request.user)
return JsonResponse()
def _refresh_course_tabs(request, course_module):
"""
Automatically adds/removes tabs if changes to the course require them.
Raises:
InvalidTabsException: raised if there's a problem with the new version of the tabs.
"""
def update_tab(tabs, tab_type, tab_enabled):
"""
Adds or removes a course tab based upon whether it is enabled.
"""
tab_panel = {
"type": tab_type.type,
}
has_tab = tab_panel in tabs
if tab_enabled and not has_tab:
tabs.append(CourseTab.from_json(tab_panel))
elif not tab_enabled and has_tab:
tabs.remove(tab_panel)
course_tabs = copy.copy(course_module.tabs)
# Additionally update any tabs that are provided by non-dynamic course views
for tab_type in CourseTabPluginManager.get_tab_types():
if not tab_type.is_dynamic and tab_type.is_default:
tab_enabled = tab_type.is_enabled(course_module, user=request.user)
update_tab(course_tabs, tab_type, tab_enabled)
CourseTabList.validate_tabs(course_tabs)
# Save the tabs into the course if they have been changed
if course_tabs != course_module.tabs:
course_module.tabs = course_tabs
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def advanced_settings_handler(request, course_key_string):
"""
Course settings configuration
GET
html: get the page
json: get the model
PUT, POST
json: update the Course's settings. The payload is a json rep of the
metadata dicts.
"""
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
return render_to_response('settings_advanced.html', {
'context_course': course_module,
'advanced_dict': CourseMetadata.fetch(course_module),
'advanced_settings_url': reverse_course_url('advanced_settings_handler', course_key),
'library_option': is_available(LIBRARY_OPTION_KEY, course_key)
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
return JsonResponse(CourseMetadata.fetch(course_module))
else:
try:
# validate data formats and update the course module.
# Note: don't update mongo yet, but wait until after any tabs are changed
is_valid, errors, updated_data = CourseMetadata.validate_and_update_from_json(
course_module,
request.json,
user=request.user,
)
if is_valid:
try:
# update the course tabs if required by any setting changes
_refresh_course_tabs(request, course_module)
except InvalidTabsException as err:
log.exception(err.message)
response_message = [
{
'message': _('An error occurred while trying to save your tabs'),
'model': {'display_name': _('Tabs Exception')}
}
]
return JsonResponseBadRequest(response_message)
# now update mongo
modulestore().update_item(course_module, request.user.id)
return JsonResponse(updated_data)
else:
return JsonResponseBadRequest(errors)
# Handle all errors that validation doesn't catch
except (TypeError, ValueError, InvalidTabsException) as err:
return HttpResponseBadRequest(
django.utils.html.escape(err.message),
content_type="text/plain"
)
class TextbookValidationError(Exception):
"An error thrown when a textbook input is invalid"
pass
def validate_textbooks_json(text):
"""
Validate the given text as representing a single PDF textbook
"""
try:
textbooks = json.loads(text)
except ValueError:
raise TextbookValidationError("invalid JSON")
if not isinstance(textbooks, (list, tuple)):
raise TextbookValidationError("must be JSON list")
for textbook in textbooks:
validate_textbook_json(textbook)
# check specified IDs for uniqueness
all_ids = [textbook["id"] for textbook in textbooks if "id" in textbook]
unique_ids = set(all_ids)
if len(all_ids) > len(unique_ids):
raise TextbookValidationError("IDs must be unique")
return textbooks
def validate_textbook_json(textbook):
"""
Validate the given text as representing a list of PDF textbooks
"""
if isinstance(textbook, basestring):
try:
textbook = json.loads(textbook)
except ValueError:
raise TextbookValidationError("invalid JSON")
if not isinstance(textbook, dict):
raise TextbookValidationError("must be JSON object")
if not textbook.get("tab_title"):
raise TextbookValidationError("must have tab_title")
tid = unicode(textbook.get("id", ""))
if tid and not tid[0].isdigit():
raise TextbookValidationError("textbook ID must start with a digit")
return textbook
def assign_textbook_id(textbook, used_ids=()):
"""
Return an ID that can be assigned to a textbook
and doesn't match the used_ids
"""
tid = Location.clean(textbook["tab_title"])
if not tid[0].isdigit():
# stick a random digit in front
tid = random.choice(string.digits) + tid
while tid in used_ids:
# add a random ASCII character to the end
tid = tid + random.choice(string.ascii_lowercase)
return tid
@require_http_methods(("GET", "POST", "PUT"))
@login_required
@ensure_csrf_cookie
def textbooks_list_handler(request, course_key_string):
"""
A RESTful handler for textbook collections.
GET
html: return textbook list page (Backbone application)
json: return JSON representation of all textbooks in this course
POST
json: create a new textbook for this course
PUT
json: overwrite all textbooks in the course with the given list
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
if "application/json" not in request.META.get('HTTP_ACCEPT', 'text/html'):
# return HTML page
upload_asset_url = reverse_course_url('assets_handler', course_key)
textbook_url = reverse_course_url('textbooks_list_handler', course_key)
return render_to_response('textbooks.html', {
'context_course': course,
'textbooks': course.pdf_textbooks,
'upload_asset_url': upload_asset_url,
'textbook_url': textbook_url,
'library_option': is_available(LIBRARY_OPTION_KEY, course_key)
})
# from here on down, we know the client has requested JSON
if request.method == 'GET':
return JsonResponse(course.pdf_textbooks)
elif request.method == 'PUT':
try:
textbooks = validate_textbooks_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
tids = set(t["id"] for t in textbooks if "id" in t)
for textbook in textbooks:
if "id" not in textbook:
tid = assign_textbook_id(textbook, tids)
textbook["id"] = tid
tids.add(tid)
if not any(tab['type'] == 'pdf_textbooks' for tab in course.tabs):
course.tabs.append(CourseTab.load('pdf_textbooks'))
course.pdf_textbooks = textbooks
store.update_item(course, request.user.id)
return JsonResponse(course.pdf_textbooks)
elif request.method == 'POST':
# create a new textbook for the course
try:
textbook = validate_textbook_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
if not textbook.get("id"):
tids = set(t["id"] for t in course.pdf_textbooks if "id" in t)
textbook["id"] = assign_textbook_id(textbook, tids)
existing = course.pdf_textbooks
existing.append(textbook)
course.pdf_textbooks = existing
if not any(tab['type'] == 'pdf_textbooks' for tab in course.tabs):
course.tabs.append(CourseTab.load('pdf_textbooks'))
store.update_item(course, request.user.id)
resp = JsonResponse(textbook, status=201)
resp["Location"] = reverse_course_url(
'textbooks_detail_handler',
course.id,
kwargs={'textbook_id': textbook["id"]}
)
return resp
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
def textbooks_detail_handler(request, course_key_string, textbook_id):
"""
JSON API endpoint for manipulating a textbook via its internal ID.
Used by the Backbone application.
GET
json: return JSON representation of textbook
POST or PUT
json: update textbook based on provided information
DELETE
json: remove textbook
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
matching_id = [tb for tb in course_module.pdf_textbooks
if unicode(tb.get("id")) == unicode(textbook_id)]
if matching_id:
textbook = matching_id[0]
else:
textbook = None
if request.method == 'GET':
if not textbook:
return JsonResponse(status=404)
return JsonResponse(textbook)
elif request.method in ('POST', 'PUT'): # can be either and sometimes
# django is rewriting one to the other
try:
new_textbook = validate_textbook_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
new_textbook["id"] = textbook_id
if textbook:
i = course_module.pdf_textbooks.index(textbook)
new_textbooks = course_module.pdf_textbooks[0:i]
new_textbooks.append(new_textbook)
new_textbooks.extend(course_module.pdf_textbooks[i + 1:])
course_module.pdf_textbooks = new_textbooks
else:
course_module.pdf_textbooks.append(new_textbook)
store.update_item(course_module, request.user.id)
return JsonResponse(new_textbook, status=201)
elif request.method == 'DELETE':
if not textbook:
return JsonResponse(status=404)
i = course_module.pdf_textbooks.index(textbook)
remaining_textbooks = course_module.pdf_textbooks[0:i]
remaining_textbooks.extend(course_module.pdf_textbooks[i + 1:])
course_module.pdf_textbooks = remaining_textbooks
store.update_item(course_module, request.user.id)
return JsonResponse()
def remove_content_or_experiment_group(request, store, course, configuration, group_configuration_id, group_id=None):
"""
Remove content group or experiment group configuration only if it's not in use.
"""
configuration_index = course.user_partitions.index(configuration)
if configuration.scheme.name == RANDOM_SCHEME:
usages = GroupConfiguration.get_content_experiment_usage_info(store, course)
used = int(group_configuration_id) in usages
if used:
return JsonResponse(
{"error": _("This group configuration is in use and cannot be deleted.")},
status=400
)
course.user_partitions.pop(configuration_index)
elif configuration.scheme.name == COHORT_SCHEME:
if not group_id:
return JsonResponse(status=404)
group_id = int(group_id)
usages = GroupConfiguration.get_content_groups_usage_info(store, course)
used = group_id in usages
if used:
return JsonResponse(
{"error": _("This content group is in use and cannot be deleted.")},
status=400
)
matching_groups = [group for group in configuration.groups if group.id == group_id]
if matching_groups:
group_index = configuration.groups.index(matching_groups[0])
configuration.groups.pop(group_index)
else:
return JsonResponse(status=404)
course.user_partitions[configuration_index] = configuration
store.update_item(course, request.user.id)
return JsonResponse(status=204)
@require_http_methods(("GET", "POST"))
@login_required
@ensure_csrf_cookie
def group_configurations_list_handler(request, course_key_string):
"""
A RESTful handler for Group Configurations
GET
html: return Group Configurations list page (Backbone application)
POST
json: create new group configuration
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
group_configuration_url = reverse_course_url('group_configurations_list_handler', course_key)
course_outline_url = reverse_course_url('course_handler', course_key)
should_show_experiment_groups = are_content_experiments_enabled(course)
if should_show_experiment_groups:
experiment_group_configurations = GroupConfiguration.get_split_test_partitions_with_usage(store, course)
else:
experiment_group_configurations = None
content_group_configuration = GroupConfiguration.get_or_create_content_group(store, course)
return render_to_response('group_configurations.html', {
'context_course': course,
'group_configuration_url': group_configuration_url,
'course_outline_url': course_outline_url,
'experiment_group_configurations': experiment_group_configurations,
'should_show_experiment_groups': should_show_experiment_groups,
'content_group_configuration': content_group_configuration,
'library_option': is_available(LIBRARY_OPTION_KEY, course_key)
})
elif "application/json" in request.META.get('HTTP_ACCEPT'):
if request.method == 'POST':
# create a new group configuration for the course
try:
new_configuration = GroupConfiguration(request.body, course).get_user_partition()
except GroupConfigurationsValidationError as err:
return JsonResponse({"error": err.message}, status=400)
course.user_partitions.append(new_configuration)
response = JsonResponse(new_configuration.to_json(), status=201)
response["Location"] = reverse_course_url(
'group_configurations_detail_handler',
course.id,
kwargs={'group_configuration_id': new_configuration.id}
)
store.update_item(course, request.user.id)
return response
else:
return HttpResponse(status=406)
@login_required
@ensure_csrf_cookie
@require_http_methods(("POST", "PUT", "DELETE"))
def group_configurations_detail_handler(request, course_key_string, group_configuration_id, group_id=None):
"""
JSON API endpoint for manipulating a group configuration via its internal ID.
Used by the Backbone application.
POST or PUT
json: update group configuration based on provided information
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
matching_id = [p for p in course.user_partitions
if unicode(p.id) == unicode(group_configuration_id)]
if matching_id:
configuration = matching_id[0]
else:
configuration = None
if request.method in ('POST', 'PUT'): # can be either and sometimes
# django is rewriting one to the other
try:
new_configuration = GroupConfiguration(request.body, course, group_configuration_id).get_user_partition()
except GroupConfigurationsValidationError as err:
return JsonResponse({"error": err.message}, status=400)
if configuration:
index = course.user_partitions.index(configuration)
course.user_partitions[index] = new_configuration
else:
course.user_partitions.append(new_configuration)
store.update_item(course, request.user.id)
configuration = GroupConfiguration.update_usage_info(store, course, new_configuration)
return JsonResponse(configuration, status=201)
elif request.method == "DELETE":
if not configuration:
return JsonResponse(status=404)
return remove_content_or_experiment_group(
request=request,
store=store,
course=course,
configuration=configuration,
group_configuration_id=group_configuration_id,
group_id=group_id
)
def are_content_experiments_enabled(course):
"""
Returns True if content experiments have been enabled for the course.
"""
return (
SPLIT_TEST_COMPONENT_TYPE in ADVANCED_COMPONENT_TYPES and
SPLIT_TEST_COMPONENT_TYPE in course.advanced_modules
)
def _get_course_creator_status(user):
"""
Helper method for returning the course creator status for a particular user,
taking into account the values of DISABLE_COURSE_CREATION and ENABLE_CREATOR_GROUP.
If the user passed in has not previously visited the index page, it will be
added with status 'unrequested' if the course creator group is in use.
"""
# Note: GaGlobalCourseCreator can create a course (#2150)
if user.is_staff or GaGlobalCourseCreatorRole().has_user(user):
course_creator_status = 'granted'
elif settings.FEATURES.get('DISABLE_COURSE_CREATION', False):
course_creator_status = 'disallowed_for_this_site'
elif settings.FEATURES.get('ENABLE_CREATOR_GROUP', False):
course_creator_status = get_course_creator_status(user)
if course_creator_status is None:
# User not grandfathered in as an existing user, has not previously visited the dashboard page.
# Add the user to the course creator admin table with status 'unrequested'.
add_user_with_status_unrequested(user)
course_creator_status = get_course_creator_status(user)
else:
course_creator_status = 'granted'
return course_creator_status
| nttks/edx-platform | cms/djangoapps/contentstore/views/course.py | Python | agpl-3.0 | 75,601 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.receive_notification'
db.add_column('truekko_userprofile', 'receive_notification',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.receive_notification'
db.delete_column('truekko_userprofile', 'receive_notification')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'truekko.channel': {
'Meta': {'object_name': 'Channel'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '800', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '500'}),
'wall': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'channels'", 'null': 'True', 'to': "orm['truekko.Wall']"})
},
'truekko.commitment': {
'Meta': {'ordering': "['-date']", 'object_name': 'Commitment'},
'comment': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'WAI'", 'max_length': '3'}),
'swap': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'commitments'", 'to': "orm['truekko.Swap']"}),
'user_from': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'my_commitments'", 'to': "orm['auth.User']"}),
'user_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'commitments_to_me'", 'to': "orm['auth.User']"})
},
'truekko.denounce': {
'Meta': {'ordering': "['-date']", 'object_name': 'Denounce'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['truekko.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msg': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'PEN'", 'max_length': '3'}),
'user_from': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dennounces_from'", 'to': "orm['auth.User']"}),
'user_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'dennounces_to'", 'to': "orm['auth.User']"})
},
'truekko.follow': {
'Meta': {'object_name': 'Follow'},
'follower': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followings'", 'to': "orm['auth.User']"}),
'following': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'truekko.group': {
'Meta': {'object_name': 'Group'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': "orm['truekko.Channel']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '800', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '500'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'web': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'truekko.item': {
'Meta': {'ordering': "['-pub_date']", 'object_name': 'Item'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'offer_or_demand': ('django.db.models.fields.CharField', [], {'default': "'OFF'", 'max_length': '3'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.IntegerField', [], {}),
'price_type': ('django.db.models.fields.CharField', [], {'default': "'ETK'", 'max_length': '20'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'IT'", 'max_length': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['auth.User']"})
},
'truekko.itemtagged': {
'Meta': {'object_name': 'ItemTagged'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['truekko.Item']"}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['truekko.Tag']"})
},
'truekko.membership': {
'Meta': {'object_name': 'Membership'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['truekko.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'truekko.swap': {
'Meta': {'ordering': "['-date']", 'object_name': 'Swap'},
'credits_from': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'credits_to': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'done_msg': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'swap_mode': ('django.db.models.fields.CharField', [], {'default': "'NON'", 'max_length': '3'}),
'user_from': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'swaps_from'", 'to': "orm['auth.User']"}),
'user_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'swaps_to'", 'to': "orm['auth.User']"})
},
'truekko.swapcomment': {
'Meta': {'object_name': 'SwapComment'},
'comment': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'swap': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['truekko.Swap']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'truekko.swapitems': {
'Meta': {'object_name': 'SwapItems'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['truekko.Item']"}),
'swap': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['truekko.Swap']"})
},
'truekko.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'truekko.transfer': {
'Meta': {'object_name': 'Transfer'},
'concept': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'credits': ('django.db.models.fields.PositiveIntegerField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'group_from': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'transfer_form'", 'null': 'True', 'to': "orm['truekko.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_from': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'transfer_from'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'transfer_to'", 'to': "orm['auth.User']"})
},
'truekko.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'credits': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '300', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'default': "'Unlocated'", 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Unnamed'", 'max_length': '100'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'rating_score': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'rating_votes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'receive_notification': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'web': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'truekko.wall': {
'Meta': {'object_name': 'Wall'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'walls'", 'null': 'True', 'to': "orm['truekko.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'walls'", 'null': 'True', 'to': "orm['auth.User']"})
},
'truekko.wallmessage': {
'Meta': {'ordering': "['-date']", 'object_name': 'WallMessage'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'msg': ('django.db.models.fields.TextField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'childs'", 'null': 'True', 'to': "orm['truekko.WallMessage']"}),
'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['auth.User']"}),
'wall': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['truekko.Wall']"})
}
}
complete_apps = ['truekko'] | wadobo/etruekko | etruekko/truekko/migrations/0013_auto__add_field_userprofile_receive_notification.py | Python | agpl-3.0 | 15,657 |
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
import clv_insured_ext
import category
import clv_insured
import clv_insured_card
import clv_tag
import res_partner
import wkf
import history
| CLVsol/odoo_addons | clv_insured_ext/__init__.py | Python | agpl-3.0 | 1,546 |
# Settings for running unittests. These are optimized for speed.
from .settings_devel import * # noqa
COMPRESS_ENABLED = False
COMPRESS_PRECOMPILERS = []
MIGRATION_MODULES = {
"weight": "healthmonitor.migrations_not_used_in_tests"
}
| jorgenschaefer/healthmonitor | healthmonitor/settings_devel_fast.py | Python | agpl-3.0 | 240 |
# Author: Drone
import web
from app.helpers import utils
from app.helpers import formatting
projectName = 'Remote Function Trainer'
listLimit = 40
# connect to database
db = web.database(dbn='mysql', db='rft', user='root', passwd='1234')
t = db.transaction()
#t.commit()
# in development debug error messages and reloader
web.config.debug = False
# in develpment template caching is set to false
cache = False
# template global functions
globals = utils.get_all_functions(formatting)
# set global base template
view = web.template.render('app/views', cache=cache, globals=globals)
# in production the internal errors are emailed to us
web.config.email_errors = '' | gcobos/rft | config.py | Python | agpl-3.0 | 673 |
# -*- coding: utf-8 -*-
# © <YEAR(S)> <AUTHOR(S)>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Account Analytic Warehouse",
"summary": "Add analytic in stock_warehouse",
"version": "9.0.1.0.0",
"category": "Accounting",
"website": "https://odoo-community.org/",
"author": "<Deysy Mascorro>, Odoo Community Association (OCA)",
"license": "AGPL-3",
"application": False,
"installable": True,
"external_dependencies": {
"python": [],
"bin": [],
},
"depends": [
"base",
"account",
"stock"
],
"data": [
"views/stock_warehouse_view.xml",
"views/stock_location_view.xml",
],
"demo": [
],
"qweb": [
]
}
| Gebesa-Dev/Addons-gebesa | stock_warehouse_analytic_id/__openerp__.py | Python | agpl-3.0 | 763 |
"""Customized autocomplete widgets"""
# Standard Library
import re
# Third Party
from dal import autocomplete
# MuckRock
from muckrock.jurisdiction.models import Jurisdiction
class MRSelect2Mixin:
"""MuckRock Model Select2 mixin"""
def __init__(self, *args, **kwargs):
attrs = {
"data-html": True,
"data-dropdown-css-class": "select2-dropdown",
"data-width": "100%",
}
attrs.update(kwargs.pop("attrs", {}))
super().__init__(*args, attrs=attrs, **kwargs)
def filter_choices_to_render(self, selected_choices):
"""Filter out non-numeric choices"""
selected_choices = [c for c in selected_choices if c.isdecimal()]
return super().filter_choices_to_render(selected_choices)
class ModelSelect2(MRSelect2Mixin, autocomplete.ModelSelect2):
"""MuckRock Model Select2"""
class ModelSelect2Multiple(MRSelect2Mixin, autocomplete.ModelSelect2Multiple):
"""MuckRock Model Select2"""
class Select2MultipleSI(MRSelect2Mixin, autocomplete.Select2Multiple):
"""MuckRock Select2 for state inclusive jurisdiction autocomplete"""
value_format = re.compile(r"\d+-(True|False)")
def filter_choices_to_render(self, selected_choices):
"""Replace self.choices with selected_choices."""
self.choices = []
for choice in selected_choices:
if not self.value_format.match(choice):
continue
pk, include_local = choice.split("-")
jurisdiction = Jurisdiction.objects.get(pk=pk)
label = str(jurisdiction)
if include_local == "True":
label += " (include local)"
self.choices.append((choice, label))
| MuckRock/muckrock | muckrock/core/autocomplete.py | Python | agpl-3.0 | 1,730 |
# -*- coding: utf-'8' "-*-"
import base64
try:
import simplejson as json
except ImportError:
import json
import logging
import urlparse
import werkzeug.urls
import urllib2
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_paypal.controllers.main import PaypalController
from openerp.osv import osv, fields
from openerp.tools.float_utils import float_compare
from openerp import SUPERUSER_ID
_logger = logging.getLogger(__name__)
class AcquirerPaypal(osv.Model):
_inherit = 'payment.acquirer'
def _get_paypal_urls(self, cr, uid, environment, context=None):
""" Paypal URLS """
if environment == 'prod':
return {
'paypal_form_url': 'https://www.paypal.com/cgi-bin/webscr',
'paypal_rest_url': 'https://api.paypal.com/v1/oauth2/token',
}
else:
return {
'paypal_form_url': 'https://www.sandbox.paypal.com/cgi-bin/webscr',
'paypal_rest_url': 'https://api.sandbox.paypal.com/v1/oauth2/token',
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerPaypal, self)._get_providers(cr, uid, context=context)
providers.append(['paypal', 'Paypal'])
return providers
_columns = {
'paypal_email_account': fields.char('Paypal Email ID', required_if_provider='paypal', groups='base.group_user'),
'paypal_seller_account': fields.char(
'Paypal Merchant ID', groups='base.group_user',
help='The Merchant ID is used to ensure communications coming from Paypal are valid and secured.'),
'paypal_use_ipn': fields.boolean('Use IPN', help='Paypal Instant Payment Notification', groups='base.group_user'),
# Server 2 server
'paypal_api_enabled': fields.boolean('Use Rest API'),
'paypal_api_username': fields.char('Rest API Username', groups='base.group_user'),
'paypal_api_password': fields.char('Rest API Password', groups='base.group_user'),
'paypal_api_access_token': fields.char('Access Token', groups='base.group_user'),
'paypal_api_access_token_validity': fields.datetime('Access Token Validity', groups='base.group_user'),
}
_defaults = {
'paypal_use_ipn': True,
'fees_active': False,
'fees_dom_fixed': 0.35,
'fees_dom_var': 3.4,
'fees_int_fixed': 0.35,
'fees_int_var': 3.9,
'paypal_api_enabled': False,
}
def _migrate_paypal_account(self, cr, uid, context=None):
""" COMPLETE ME """
cr.execute('SELECT id, paypal_account FROM res_company')
res = cr.fetchall()
for (company_id, company_paypal_account) in res:
if company_paypal_account:
company_paypal_ids = self.search(cr, uid, [('company_id', '=', company_id), ('provider', '=', 'paypal')], limit=1, context=context)
if company_paypal_ids:
self.write(cr, uid, company_paypal_ids, {'paypal_email_account': company_paypal_account}, context=context)
else:
paypal_view = self.pool['ir.model.data'].get_object(cr, uid, 'payment_paypal', 'paypal_acquirer_button')
self.create(cr, uid, {
'name': 'Paypal',
'provider': 'paypal',
'paypal_email_account': company_paypal_account,
'view_template_id': paypal_view.id,
}, context=context)
return True
def paypal_compute_fees(self, cr, uid, id, amount, currency_id, country_id, context=None):
""" Compute paypal fees.
:param float amount: the amount to pay
:param integer country_id: an ID of a res.country, or None. This is
the customer's country, to be compared to
the acquirer company country.
:return float fees: computed fees
"""
acquirer = self.browse(cr, uid, id, context=context)
if not acquirer.fees_active:
return 0.0
country = self.pool['res.country'].browse(cr, uid, country_id, context=context)
if country and acquirer.company_id.country_id.id == country.id:
percentage = acquirer.fees_dom_var
fixed = acquirer.fees_dom_fixed
else:
percentage = acquirer.fees_int_var
fixed = acquirer.fees_int_fixed
fees = (percentage / 100.0 * amount + fixed ) / (1 - percentage / 100.0)
return fees
def paypal_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, SUPERUSER_ID, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
paypal_tx_values = dict(tx_values)
paypal_tx_values.update({
'cmd': '_xclick',
'business': acquirer.paypal_email_account,
'item_name': '%s: %s' % (acquirer.company_id.name, tx_values['reference']),
'item_number': tx_values['reference'],
'amount': tx_values['amount'],
'currency_code': tx_values['currency'] and tx_values['currency'].name or '',
'address1': partner_values['address'],
'city': partner_values['city'],
'country': partner_values['country'] and partner_values['country'].code or '',
'state': partner_values['state'] and (partner_values['state'].code or partner_values['state'].name) or '',
'email': partner_values['email'],
'zip': partner_values['zip'],
'first_name': partner_values['first_name'],
'last_name': partner_values['last_name'],
'return': '%s' % urlparse.urljoin(base_url, PaypalController._return_url),
'notify_url': '%s' % urlparse.urljoin(base_url, PaypalController._notify_url),
'cancel_return': '%s' % urlparse.urljoin(base_url, PaypalController._cancel_url),
})
if acquirer.fees_active:
paypal_tx_values['handling'] = '%.2f' % paypal_tx_values.pop('fees', 0.0)
if paypal_tx_values.get('return_url'):
paypal_tx_values['custom'] = json.dumps({'return_url': '%s' % paypal_tx_values.pop('return_url')})
return partner_values, paypal_tx_values
def paypal_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_paypal_urls(cr, uid, acquirer.environment, context=context)['paypal_form_url']
def _paypal_s2s_get_access_token(self, cr, uid, ids, context=None):
"""
Note: see # see http://stackoverflow.com/questions/2407126/python-urllib2-basic-auth-problem
for explanation why we use Authorization header instead of urllib2
password manager
"""
res = dict.fromkeys(ids, False)
parameters = werkzeug.url_encode({'grant_type': 'client_credentials'})
for acquirer in self.browse(cr, uid, ids, context=context):
tx_url = self._get_paypal_urls(cr, uid, acquirer.environment)['paypal_rest_url']
request = urllib2.Request(tx_url, parameters)
# add other headers (https://developer.paypal.com/webapps/developer/docs/integration/direct/make-your-first-call/)
request.add_header('Accept', 'application/json')
request.add_header('Accept-Language', tools.config.defaultLang)
# add authorization header
base64string = base64.encodestring('%s:%s' % (
acquirer.paypal_api_username,
acquirer.paypal_api_password)
).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
request = urllib2.urlopen(request)
result = request.read()
res[acquirer.id] = json.loads(result).get('access_token')
request.close()
return res
class TxPaypal(osv.Model):
_inherit = 'payment.transaction'
_columns = {
'paypal_txn_id': fields.char('Transaction ID'),
'paypal_txn_type': fields.char('Transaction type'),
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _paypal_form_get_tx_from_data(self, cr, uid, data, context=None):
reference, txn_id = data.get('item_number'), data.get('txn_id')
if not reference or not txn_id:
error_msg = 'Paypal: received data with missing reference (%s) or txn_id (%s)' % (reference, txn_id)
_logger.error(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use txn_id ?
tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Paypal: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
return self.browse(cr, uid, tx_ids[0], context=context)
def _paypal_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
_logger.info('Received a notification from Paypal with IPN version %s', data.get('notify_version'))
if data.get('test_ipn'):
_logger.warning(
'Received a notification from Paypal using sandbox'
),
# TODO: txn_id: shoudl be false at draft, set afterwards, and verified with txn details
if tx.acquirer_reference and data.get('txn_id') != tx.acquirer_reference:
invalid_parameters.append(('txn_id', data.get('txn_id'), tx.acquirer_reference))
# check what is buyed
if float_compare(float(data.get('mc_gross', '0.0')), (tx.amount + tx.fees), 2) != 0:
invalid_parameters.append(('mc_gross', data.get('mc_gross'), '%.2f' % tx.amount)) # mc_gross is amount + fees
if data.get('mc_currency') != tx.currency_id.name:
invalid_parameters.append(('mc_currency', data.get('mc_currency'), tx.currency_id.name))
if 'handling_amount' in data and float_compare(float(data.get('handling_amount')), tx.fees, 2) != 0:
invalid_parameters.append(('handling_amount', data.get('handling_amount'), tx.fees))
# check buyer
if tx.partner_reference and data.get('payer_id') != tx.partner_reference:
invalid_parameters.append(('payer_id', data.get('payer_id'), tx.partner_reference))
# check seller
if data.get('receiver_id') and tx.acquirer_id.paypal_seller_account and data['receiver_id'] != tx.acquirer_id.paypal_seller_account:
invalid_parameters.append(('receiver_id', data.get('receiver_id'), tx.acquirer_id.paypal_seller_account))
if not data.get('receiver_id') or not tx.acquirer_id.paypal_seller_account:
# Check receiver_email only if receiver_id was not checked.
# In Paypal, this is possible to configure as receiver_email a different email than the business email (the login email)
# In Odoo, there is only one field for the Paypal email: the business email. This isn't possible to set a receiver_email
# different than the business email. Therefore, if you want such a configuration in your Paypal, you are then obliged to fill
# the Merchant ID in the Paypal payment acquirer in Odoo, so the check is performed on this variable instead of the receiver_email.
# At least one of the two checks must be done, to avoid fraudsters.
if data.get('receiver_email') != tx.acquirer_id.paypal_email_account:
invalid_parameters.append(('receiver_email', data.get('receiver_email'), tx.acquirer_id.paypal_email_account))
return invalid_parameters
def _paypal_form_validate(self, cr, uid, tx, data, context=None):
status = data.get('payment_status')
data = {
'acquirer_reference': data.get('txn_id'),
'paypal_txn_type': data.get('payment_type'),
'partner_reference': data.get('payer_id')
}
if status in ['Completed', 'Processed']:
_logger.info('Validated Paypal payment for tx %s: set as done' % (tx.reference))
data.update(state='done', date_validate=data.get('payment_date', fields.datetime.now()))
return tx.write(data)
elif status in ['Pending', 'Expired']:
_logger.info('Received notification for Paypal payment %s: set as pending' % (tx.reference))
data.update(state='pending', state_message=data.get('pending_reason', ''))
return tx.write(data)
else:
error = 'Received unrecognized status for Paypal payment %s: %s, set as error' % (tx.reference, status)
_logger.info(error)
data.update(state='error', state_message=error)
return tx.write(data)
# --------------------------------------------------
# SERVER2SERVER RELATED METHODS
# --------------------------------------------------
def _paypal_try_url(self, request, tries=3, context=None):
""" Try to contact Paypal. Due to some issues, internal service errors
seem to be quite frequent. Several tries are done before considering
the communication as failed.
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
done, res = False, None
while (not done and tries):
try:
res = urllib2.urlopen(request)
done = True
except urllib2.HTTPError as e:
res = e.read()
e.close()
if tries and res and json.loads(res)['name'] == 'INTERNAL_SERVICE_ERROR':
_logger.warning('Failed contacting Paypal, retrying (%s remaining)' % tries)
tries = tries - 1
if not res:
pass
# raise openerp.exceptions.
result = res.read()
res.close()
return result
def _paypal_s2s_send(self, cr, uid, values, cc_values, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
tx_id = self.create(cr, uid, values, context=context)
tx = self.browse(cr, uid, tx_id, context=context)
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % tx.acquirer_id._paypal_s2s_get_access_token()[tx.acquirer_id.id],
}
data = {
'intent': 'sale',
'transactions': [{
'amount': {
'total': '%.2f' % tx.amount,
'currency': tx.currency_id.name,
},
'description': tx.reference,
}]
}
if cc_values:
data['payer'] = {
'payment_method': 'credit_card',
'funding_instruments': [{
'credit_card': {
'number': cc_values['number'],
'type': cc_values['brand'],
'expire_month': cc_values['expiry_mm'],
'expire_year': cc_values['expiry_yy'],
'cvv2': cc_values['cvc'],
'first_name': tx.partner_name,
'last_name': tx.partner_name,
'billing_address': {
'line1': tx.partner_address,
'city': tx.partner_city,
'country_code': tx.partner_country_id.code,
'postal_code': tx.partner_zip,
}
}
}]
}
else:
# TODO: complete redirect URLs
data['redirect_urls'] = {
# 'return_url': 'http://example.com/your_redirect_url/',
# 'cancel_url': 'http://example.com/your_cancel_url/',
},
data['payer'] = {
'payment_method': 'paypal',
}
data = json.dumps(data)
request = urllib2.Request('https://api.sandbox.paypal.com/v1/payments/payment', data, headers)
result = self._paypal_try_url(request, tries=3, context=context)
return (tx_id, result)
def _paypal_s2s_get_invalid_parameters(self, cr, uid, tx, data, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
invalid_parameters = []
return invalid_parameters
def _paypal_s2s_validate(self, cr, uid, tx, data, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
values = json.loads(data)
status = values.get('state')
if status in ['approved']:
_logger.info('Validated Paypal s2s payment for tx %s: set as done' % (tx.reference))
tx.write({
'state': 'done',
'date_validate': values.get('udpate_time', fields.datetime.now()),
'paypal_txn_id': values['id'],
})
return True
elif status in ['pending', 'expired']:
_logger.info('Received notification for Paypal s2s payment %s: set as pending' % (tx.reference))
tx.write({
'state': 'pending',
# 'state_message': data.get('pending_reason', ''),
'paypal_txn_id': values['id'],
})
return True
else:
error = 'Received unrecognized status for Paypal s2s payment %s: %s, set as error' % (tx.reference, status)
_logger.info(error)
tx.write({
'state': 'error',
# 'state_message': error,
'paypal_txn_id': values['id'],
})
return False
def _paypal_s2s_get_tx_status(self, cr, uid, tx, context=None):
"""
.. versionadded:: pre-v8 saas-3
.. warning::
Experimental code. You should not use it before OpenERP v8 official
release.
"""
# TDETODO: check tx.paypal_txn_id is set
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % tx.acquirer_id._paypal_s2s_get_access_token()[tx.acquirer_id.id],
}
url = 'https://api.sandbox.paypal.com/v1/payments/payment/%s' % (tx.paypal_txn_id)
request = urllib2.Request(url, headers=headers)
data = self._paypal_try_url(request, tries=3, context=context)
return self.s2s_feedback(cr, uid, tx.id, data, context=context)
| funkring/fdoo | addons/payment_paypal/models/paypal.py | Python | agpl-3.0 | 19,377 |
"""Tests for plugin.py."""
import ckanext.dictionary.plugin as plugin
def test_plugin():
pass | cmuphillycapstone/ckanext-dictionary | ckanext/dictionary/tests/test_plugin.py | Python | agpl-3.0 | 98 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Pupil.birthday'
db.add_column('gsaudit_pupil', 'birthday',
self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2012, 6, 17, 0, 0)),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Pupil.birthday'
db.delete_column('gsaudit_pupil', 'birthday')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'gsaudit.audit': {
'Meta': {'object_name': 'Audit'},
'assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.TeachingAssignment']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'written_exam': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'gsaudit.auditskill': {
'Meta': {'object_name': 'AuditSkill'},
'audit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Audit']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Skill']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'gsaudit.grade': {
'Meta': {'object_name': 'Grade'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.School']"})
},
'gsaudit.gradeparticipant': {
'Meta': {'object_name': 'GradeParticipant'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'grade': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Grade']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'pupil': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Pupil']"})
},
'gsaudit.pupil': {
'Meta': {'ordering': "('first_name', 'last_name')", 'object_name': 'Pupil'},
'birthday': ('django.db.models.fields.DateField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'modified': ('django.db.models.fields.DateTimeField', [], {})
},
'gsaudit.pupilauditskill': {
'Meta': {'object_name': 'PupilAuditSkill'},
'audit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Audit']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'diagnosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pupil': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Pupil']"}),
'rating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Skill']"}),
'written_exam': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'gsaudit.pupiltainfo': {
'Meta': {'unique_together': "(('pupil', 'teaching_assignment'),)", 'object_name': 'PupilTAInfo'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'pupil': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Pupil']"}),
'teaching_assignment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.TeachingAssignment']"}),
'written_exam_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'gsaudit.school': {
'Meta': {'object_name': 'School'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'gsaudit.skill': {
'Meta': {'object_name': 'Skill'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['gsaudit.Skill']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'gsaudit.subject': {
'Meta': {'object_name': 'Subject'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'gsaudit.teacher': {
'Meta': {'object_name': 'Teacher', '_ormbases': ['auth.User']},
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.School']"}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'gsaudit.teachingassignment': {
'Meta': {'object_name': 'TeachingAssignment'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'grade': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Grade']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jsondata': ('jsonfield.JSONField', [], {'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Skill']"}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Subject']"}),
'teacher': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['gsaudit.Teacher']"})
}
}
complete_apps = ['gsaudit']
| schnapptack/gskompetenzen | features/gsaudit/migrations/0011_auto__add_field_pupil_birthday.py | Python | agpl-3.0 | 12,951 |
import urlparse
import sys,urllib
import xbmc, xbmcgui, xbmcaddon, xbmcplugin
import urlresolver
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
_addon = xbmcaddon.Addon()
_icon = _addon.getAddonInfo('icon')
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
def resolve_url(url):
duration=7500 #in milliseconds
message = "Cannot Play URL"
stream_url = urlresolver.HostedMediaFile(url=url).resolve()
# If urlresolver returns false then the video url was not resolved.
if not stream_url:
dialog = xbmcgui.Dialog()
dialog.notification("URL Resolver Error", message, xbmcgui.NOTIFICATION_INFO, duration)
return False
else:
return stream_url
def play_video(path):
"""
Play a video by the provided path.
:param path: str
"""
# Create a playable item with a path to play.
play_item = xbmcgui.ListItem(path=path)
vid_url = play_item.getfilename()
stream_url = resolve_url(vid_url)
if stream_url:
play_item.setPath(stream_url)
# Pass the item to the Kodi player.
xbmcplugin.setResolvedUrl(addon_handle, True, listitem=play_item)
# addon kicks in
mode = args.get('mode', None)
if mode is None:
video_play_url = "http://www.vidsplay.com/wp-content/uploads/2017/04/alligator.mp4"
url = build_url({'mode' :'play', 'playlink' : video_play_url})
li = xbmcgui.ListItem('Play Video 1', iconImage='DefaultVideo.png')
li.setProperty('IsPlayable' , 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
video_play_url = "https://www.youtube.com/watch?v=J9d9UrK0Jsw"
url = build_url({'mode' :'play', 'playlink' : video_play_url})
li = xbmcgui.ListItem('Play Video 2', iconImage='DefaultVideo.png')
li.setProperty('IsPlayable' , 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
video_play_url = "www.reddit.com"
url = build_url({'mode' :'play', 'playlink' : video_play_url})
li = xbmcgui.ListItem('Play Video 3', iconImage='DefaultVideo.png')
li.setProperty('IsPlayable' , 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
xbmcplugin.endOfDirectory(addon_handle)
elif mode[0] == 'play':
final_link = args['playlink'][0]
play_video(final_link)
| munchycool/forthelulz | plugin.video.v1d30play/playvideo.py | Python | agpl-3.0 | 2,398 |
""" Mixins for setting up particular course structures (such as split tests or cohorted content) """
from datetime import datetime
from pytz import UTC
from openedx.core.djangoapps.course_groups.models import CourseUserGroupPartitionGroup
from openedx.core.djangoapps.course_groups.tests.helpers import CohortFactory
from openedx.core.djangoapps.user_api.tests.factories import UserCourseTagFactory
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.partitions.partitions import UserPartition, Group
from student.tests.factories import CourseEnrollmentFactory, UserFactory
class ContentGroupTestCase(ModuleStoreTestCase):
"""
Sets up discussion modules visible to content groups 'Alpha' and
'Beta', as well as a module visible to all students. Creates a
staff user, users with access to Alpha/Beta (by way of cohorts),
and a non-cohorted user with no special access.
"""
def setUp(self):
super(ContentGroupTestCase, self).setUp()
self.course = CourseFactory.create(
org='org', number='number', run='run',
# This test needs to use a course that has already started --
# discussion topics only show up if the course has already started,
# and the default start date for courses is Jan 1, 2030.
start=datetime(2012, 2, 3, tzinfo=UTC),
user_partitions=[
UserPartition(
0,
'Content Group Configuration',
'',
[Group(1, 'Alpha'), Group(2, 'Beta')],
scheme_id='cohort'
)
],
grading_policy={
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"passing_grade": 0,
"weight": 1.0
}]
},
cohort_config={'cohorted': True},
discussion_topics={}
)
self.staff_user = UserFactory.create(is_staff=True)
self.alpha_user = UserFactory.create()
self.beta_user = UserFactory.create()
self.non_cohorted_user = UserFactory.create()
for user in [self.staff_user, self.alpha_user, self.beta_user, self.non_cohorted_user]:
CourseEnrollmentFactory.create(user=user, course_id=self.course.id)
alpha_cohort = CohortFactory(
course_id=self.course.id,
name='Cohort Alpha',
users=[self.alpha_user]
)
beta_cohort = CohortFactory(
course_id=self.course.id,
name='Cohort Beta',
users=[self.beta_user]
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=alpha_cohort,
partition_id=self.course.user_partitions[0].id,
group_id=self.course.user_partitions[0].groups[0].id
)
CourseUserGroupPartitionGroup.objects.create(
course_user_group=beta_cohort,
partition_id=self.course.user_partitions[0].id,
group_id=self.course.user_partitions[0].groups[1].id
)
self.alpha_module = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='alpha_group_discussion',
discussion_target='Visible to Alpha',
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[0].id]}
)
self.beta_module = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='beta_group_discussion',
discussion_target='Visible to Beta',
group_access={self.course.user_partitions[0].id: [self.course.user_partitions[0].groups[1].id]}
)
self.global_module = ItemFactory.create(
parent_location=self.course.location,
category='discussion',
discussion_id='global_group_discussion',
discussion_target='Visible to Everyone'
)
self.course = self.store.get_item(self.course.location)
class TestConditionalContent(ModuleStoreTestCase):
"""
Construct a course with graded problems that exist within a split test.
"""
TEST_SECTION_NAME = 'Problem'
def setUp(self):
"""
Set up a course with graded problems within a split test.
Course hierarchy is as follows (modeled after how split tests
are created in studio):
-> course
-> chapter
-> sequential (graded)
-> vertical
-> split_test
-> vertical (Group A)
-> problem
-> vertical (Group B)
-> problem
"""
super(TestConditionalContent, self).setUp()
# Create user partitions
self.user_partition_group_a = 0
self.user_partition_group_b = 1
self.partition = UserPartition(
0,
'first_partition',
'First Partition',
[
Group(self.user_partition_group_a, 'Group A'),
Group(self.user_partition_group_b, 'Group B')
]
)
# Create course with group configurations and grading policy
self.course = CourseFactory.create(
user_partitions=[self.partition],
grading_policy={
"GRADER": [{
"type": "Homework",
"min_count": 1,
"drop_count": 0,
"short_label": "HW",
"passing_grade": 0,
"weight": 1.0
}]
}
)
chapter = ItemFactory.create(parent_location=self.course.location,
display_name='Chapter')
# add a sequence to the course to which the problems can be added
self.problem_section = ItemFactory.create(parent_location=chapter.location,
category='sequential',
metadata={'graded': True, 'format': 'Homework'},
display_name=self.TEST_SECTION_NAME)
# Create users and partition them
self.student_a = UserFactory.create(username='student_a', email='student_a@example.com')
CourseEnrollmentFactory.create(user=self.student_a, course_id=self.course.id)
self.student_b = UserFactory.create(username='student_b', email='student_b@example.com')
CourseEnrollmentFactory.create(user=self.student_b, course_id=self.course.id)
UserCourseTagFactory(
user=self.student_a,
course_id=self.course.id,
key='xblock.partition_service.partition_{0}'.format(self.partition.id), # pylint: disable=no-member
value=str(self.user_partition_group_a)
)
UserCourseTagFactory(
user=self.student_b,
course_id=self.course.id,
key='xblock.partition_service.partition_{0}'.format(self.partition.id), # pylint: disable=no-member
value=str(self.user_partition_group_b)
)
# Create a vertical to contain our split test
problem_vertical = ItemFactory.create(
parent_location=self.problem_section.location,
category='vertical',
display_name='Problem Unit'
)
# Create the split test and child vertical containers
vertical_a_url = self.course.id.make_usage_key('vertical', 'split_test_vertical_a')
vertical_b_url = self.course.id.make_usage_key('vertical', 'split_test_vertical_b')
self.split_test = ItemFactory.create(
parent_location=problem_vertical.location,
category='split_test',
display_name='Split Test',
user_partition_id=self.partition.id, # pylint: disable=no-member
group_id_to_child={str(index): url for index, url in enumerate([vertical_a_url, vertical_b_url])}
)
self.vertical_a = ItemFactory.create(
parent_location=self.split_test.location,
category='vertical',
display_name='Group A problem container',
location=vertical_a_url
)
self.vertical_b = ItemFactory.create(
parent_location=self.split_test.location,
category='vertical',
display_name='Group B problem container',
location=vertical_b_url
)
| martynovp/edx-platform | openedx/core/djangoapps/util/testing.py | Python | agpl-3.0 | 8,831 |
import json
import os
import os.path
import types
from django.conf import settings
from models import FSExpirations
if settings.DJFS['type'] == 'osfs':
from fs.osfs import OSFS
elif settings.DJFS['type'] == 's3fs':
from fs.s3fs import S3FS
from boto.s3.connection import S3Connection
from boto.s3.key import Key
s3conn = S3Connection()
else:
raise AttributeError("Bad filesystem: "+str(settings.DJFS['type']))
def get_filesystem(namespace):
''' Returns a pyfilesystem for static module storage.
The file system will have two additional properties:
1) get_url: A way to get a URL for a static file download
2) expire: A way to expire files (so they are automatically destroyed)
'''
if settings.DJFS['type'] == 'osfs':
return get_osfs( namespace )
elif settings.DJFS['type'] == 's3fs':
return get_s3fs( namespace )
else:
raise AttributeError("Bad filesystem: "+str(settings.DJFS['type']))
def expire_objects():
''' Remove all obsolete objects from the file systems. Untested. '''
objects = sorted(FSExpirations.expired(), key=lambda x:x.module)
fs = None
module = None
for o in objects:
if module != o.module:
module = o.module
fs = get_filesystem(module)
if fs.exists(o.filename):
fs.remove(o.filename)
o.delete()
def patch_fs(fs, namespace, url_method):
''' Patch a filesystem object to add two methods:
get_url returns a URL for a resource stored on that filesystem. It takes two parameters:
filename: Which resource
timeout: How long that resource is available for
expire sets a timeout on how long the system should keep the resource. It takes four parameters:
filename: Which resource
seconds: How long we will keep it
days: (optional) More user-friendly if a while
expires: (optional) boolean; if set to False, we keep the resource forever.
Without calling this method, we provide no guarantees on how long resources will stick around.
'''
def expire(self, filename, seconds, days=0, expires = True):
''' Set the lifespan of a file on the filesystem.
filename: Name of file
expire: False means the file will never be removed
seconds and days give time to expiration.
'''
FSExpirations.create_expiration(namespace, filename, seconds, days=days, expires = expires)
fs.expire = types.MethodType(expire, fs)
fs.get_url = types.MethodType(url_method, fs)
return fs
def get_osfs(namespace):
''' Helper method to get_filesystem for a file system on disk '''
full_path = os.path.join(settings.DJFS['directory_root'], namespace)
if not os.path.exists(full_path):
os.makedirs(full_path)
osfs = OSFS(full_path)
osfs = patch_fs(osfs, namespace, lambda self, filename, timeout=0:os.path.join(settings.DJFS['url_root'], namespace, filename))
return osfs
def get_s3fs(namespace):
''' Helper method to get_filesystem for a file system on S3 '''
fullpath = namespace
if 'prefix' in settings.DJFS:
fullpath = os.path.join(settings.DJFS['prefix'], fullpath)
s3fs = S3FS(settings.DJFS['bucket'], fullpath)
def get_s3_url(self, filename, timeout=60):
global s3conn
try:
return s3conn.generate_s3_url(timeout, 'GET', bucket = settings.DJFS['bucket'], key = filename)
except: # If connection has timed out
s3conn = S3Connection()
return s3conn.generate_s3_url(timeout, 'GET', bucket = settings.DJFS['bucket'], key = filename)
s3fs = patch_fs(s3fs, namespace, get_s3_url)
return s3fs
| edx/insights | src/edinsights/modulefs/modulefs.py | Python | agpl-3.0 | 3,765 |
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import datetime
import logging
# Django
from django.contrib.auth.models import User
from django.urls import reverse
# wger
from wger.core.tests import api_base_test
from wger.core.tests.base_testcase import (
STATUS_CODES_FAIL,
WgerAddTestCase,
WgerDeleteTestCase,
WgerEditTestCase,
WgerTestCase
)
from wger.manager.models import (
Schedule,
ScheduleStep,
Workout
)
from wger.utils.helpers import make_token
logger = logging.getLogger(__name__)
class ScheduleShareButtonTestCase(WgerTestCase):
"""
Test that the share button is correctly displayed and hidden
"""
def test_share_button(self):
workout = Workout.objects.get(pk=2)
response = self.client.get(workout.get_absolute_url())
self.assertFalse(response.context['show_shariff'])
self.user_login('admin')
response = self.client.get(workout.get_absolute_url())
self.assertTrue(response.context['show_shariff'])
self.user_login('test')
response = self.client.get(workout.get_absolute_url())
self.assertFalse(response.context['show_shariff'])
class ScheduleAccessTestCase(WgerTestCase):
"""
Test accessing the workout page
"""
def test_access_shared(self):
"""
Test accessing the URL of a shared workout
"""
workout = Schedule.objects.get(pk=2)
self.user_login('admin')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.user_login('test')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_access_not_shared(self):
"""
Test accessing the URL of a private workout
"""
workout = Schedule.objects.get(pk=1)
self.user_login('admin')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 403)
self.user_login('test')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 403)
class ScheduleRepresentationTestCase(WgerTestCase):
"""
Test the representation of a model
"""
def test_representation(self):
"""
Test that the representation of an object is correct
"""
self.assertEqual("{0}".format(Schedule.objects.get(pk=1)),
'my cool schedule that i found on the internet')
class CreateScheduleTestCase(WgerAddTestCase):
"""
Tests adding a schedule
"""
object_class = Schedule
url = 'manager:schedule:add'
user_success = 'test'
user_fail = False
data = {'name': 'My cool schedule',
'start_date': datetime.date.today(),
'is_active': True,
'is_loop': True}
class DeleteScheduleTestCase(WgerDeleteTestCase):
"""
Tests deleting a schedule
"""
object_class = Schedule
url = 'manager:schedule:delete'
pk = 1
user_success = 'test'
user_fail = 'admin'
class EditScheduleTestCase(WgerEditTestCase):
"""
Tests editing a schedule
"""
object_class = Schedule
url = 'manager:schedule:edit'
pk = 3
data = {'name': 'An updated name',
'start_date': datetime.date.today(),
'is_active': True,
'is_loop': True}
class ScheduleTestCase(WgerTestCase):
"""
Other tests
"""
def schedule_detail_page(self):
"""
Helper function
"""
response = self.client.get(reverse('manager:schedule:view', kwargs={'pk': 2}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This schedule is a loop')
schedule = Schedule.objects.get(pk=2)
schedule.is_loop = False
schedule.save()
response = self.client.get(reverse('manager:schedule:view', kwargs={'pk': 2}))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'This schedule is a loop')
def test_schedule_detail_page_owner(self):
"""
Tests the schedule detail page as the owning user
"""
self.user_login()
self.schedule_detail_page()
def test_schedule_overview(self):
"""
Tests the schedule overview
"""
self.user_login()
response = self.client.get(reverse('manager:schedule:overview'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['schedules']), 3)
self.assertTrue(response.context['schedules'][0].is_active)
schedule = Schedule.objects.get(pk=4)
schedule.is_active = False
schedule.save()
response = self.client.get(reverse('manager:schedule:overview'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['schedules']), 3)
for i in range(0, 3):
self.assertFalse(response.context['schedules'][i].is_active)
def test_schedule_active(self):
"""
Tests that only one schedule can be active at a time (per user)
"""
def get_schedules():
schedule1 = Schedule.objects.get(pk=2)
schedule2 = Schedule.objects.get(pk=3)
schedule3 = Schedule.objects.get(pk=4)
return (schedule1, schedule2, schedule3)
self.user_login()
(schedule1, schedule2, schedule3) = get_schedules()
self.assertTrue(schedule3.is_active)
schedule1.is_active = True
schedule1.save()
(schedule1, schedule2, schedule3) = get_schedules()
self.assertTrue(schedule1.is_active)
self.assertFalse(schedule2.is_active)
self.assertFalse(schedule3.is_active)
schedule2.is_active = True
schedule2.save()
(schedule1, schedule2, schedule3) = get_schedules()
self.assertFalse(schedule1.is_active)
self.assertTrue(schedule2.is_active)
self.assertFalse(schedule3.is_active)
def start_schedule(self, fail=False):
"""
Helper function
"""
schedule = Schedule.objects.get(pk=2)
self.assertFalse(schedule.is_active)
self.assertNotEqual(schedule.start_date, datetime.date.today())
response = self.client.get(reverse('manager:schedule:start', kwargs={'pk': 2}))
schedule = Schedule.objects.get(pk=2)
if fail:
self.assertIn(response.status_code, STATUS_CODES_FAIL)
self.assertFalse(schedule.is_active)
self.assertNotEqual(schedule.start_date, datetime.date.today())
else:
self.assertEqual(response.status_code, 302)
self.assertTrue(schedule.is_active)
self.assertEqual(schedule.start_date, datetime.date.today())
def test_start_schedule_owner(self):
"""
Tests starting a schedule as the owning user
"""
self.user_login()
self.start_schedule()
def test_start_schedule_other(self):
"""
Tests starting a schedule as a different user
"""
self.user_login('test')
self.start_schedule(fail=True)
def test_start_schedule_anonymous(self):
"""
Tests starting a schedule as a logged out user
"""
self.start_schedule(fail=True)
class ScheduleEndDateTestCase(WgerTestCase):
"""
Test the schedule's get_end_date method
"""
def test_loop_schedule(self):
"""
Loop schedules have no end date
"""
schedule = Schedule.objects.get(pk=2)
self.assertTrue(schedule.is_loop)
self.assertFalse(schedule.get_end_date())
def test_calculate(self):
"""
Test the actual calculation
Steps: 3, 5 and 2 weeks, starting on the 2013-04-21
"""
schedule = Schedule.objects.get(pk=2)
schedule.is_loop = False
schedule.save()
self.assertEqual(schedule.get_end_date(), datetime.date(2013, 6, 30))
def test_empty_schedule(self):
"""
Test the end date with an empty schedule
"""
schedule = Schedule.objects.get(pk=3)
self.assertEqual(schedule.get_end_date(), schedule.start_date)
class ScheduleModelTestCase(WgerTestCase):
"""
Tests the model methods
"""
def delete_objects(self, user):
"""
Helper function
"""
Workout.objects.filter(user=user).delete()
Schedule.objects.filter(user=user).delete()
def create_schedule(self, user, start_date=datetime.date.today(), is_loop=False):
"""
Helper function
"""
schedule = Schedule()
schedule.user = user
schedule.name = 'temp'
schedule.is_active = True
schedule.start_date = start_date
schedule.is_loop = is_loop
schedule.save()
return schedule
def create_workout(self, user):
"""
Helper function
"""
workout = Workout()
workout.user = user
workout.save()
return workout
def test_get_workout_steps_test_1(self):
"""
Test with no workouts and no schedule steps
"""
self.user_login('test')
user = User.objects.get(pk=2)
self.delete_objects(user)
schedule = self.create_schedule(user)
self.assertFalse(schedule.get_current_scheduled_workout())
def test_get_workout_steps_test_2(self):
"""
Test with one schedule step
"""
self.user_login('test')
user = User.objects.get(pk=2)
self.delete_objects(user)
schedule = self.create_schedule(user)
workout = self.create_workout(user)
step = ScheduleStep()
step.schedule = schedule
step.workout = workout
step.duration = 3
step.save()
self.assertEqual(schedule.get_current_scheduled_workout().workout, workout)
def test_get_workout_steps_test_3(self):
"""
Test with 3 steps
"""
self.user_login('test')
user = User.objects.get(pk=2)
self.delete_objects(user)
start_date = datetime.date.today() - datetime.timedelta(weeks=4)
schedule = self.create_schedule(user, start_date=start_date)
workout = self.create_workout(user)
step = ScheduleStep()
step.schedule = schedule
step.workout = workout
step.duration = 3
step.order = 1
step.save()
workout2 = self.create_workout(user)
step2 = ScheduleStep()
step2.schedule = schedule
step2.workout = workout2
step2.duration = 1
step2.order = 2
step2.save()
workout3 = self.create_workout(user)
step3 = ScheduleStep()
step3.schedule = schedule
step3.workout = workout3
step3.duration = 2
step3.order = 3
step3.save()
self.assertEqual(schedule.get_current_scheduled_workout().workout, workout2)
def test_get_workout_steps_test_4(self):
"""
Test with 3 steps. Start is too far in the past, schedule ist not a loop
"""
self.user_login('test')
user = User.objects.get(pk=2)
self.delete_objects(user)
start_date = datetime.date.today() - datetime.timedelta(weeks=7)
schedule = self.create_schedule(user, start_date=start_date)
workout = self.create_workout(user)
step = ScheduleStep()
step.schedule = schedule
step.workout = workout
step.duration = 3
step.order = 1
step.save()
workout2 = self.create_workout(user)
step2 = ScheduleStep()
step2.schedule = schedule
step2.workout = workout2
step2.duration = 1
step2.order = 2
step2.save()
workout3 = self.create_workout(user)
step3 = ScheduleStep()
step3.schedule = schedule
step3.workout = workout3
step3.duration = 2
step3.order = 3
step3.save()
self.assertFalse(schedule.get_current_scheduled_workout())
def test_get_workout_steps_test_5(self):
"""
Test with 3 steps. Start is too far in the past but schedule is a loop
"""
self.user_login('test')
user = User.objects.get(pk=2)
self.delete_objects(user)
start_date = datetime.date.today() - datetime.timedelta(weeks=7)
schedule = self.create_schedule(user, start_date=start_date, is_loop=True)
workout = self.create_workout(user)
step = ScheduleStep()
step.schedule = schedule
step.workout = workout
step.duration = 3
step.order = 1
step.save()
workout2 = self.create_workout(user)
step2 = ScheduleStep()
step2.schedule = schedule
step2.workout = workout2
step2.duration = 1
step2.order = 2
step2.save()
workout3 = self.create_workout(user)
step3 = ScheduleStep()
step3.schedule = schedule
step3.workout = workout3
step3.duration = 2
step3.order = 3
step3.save()
self.assertTrue(schedule.get_current_scheduled_workout().workout, workout)
class SchedulePdfExportTestCase(WgerTestCase):
"""
Test exporting a schedule as a pdf
"""
def export_pdf_token(self, pdf_type="log"):
"""
Helper function to test exporting a workout as a pdf using tokens
"""
user = User.objects.get(username='test')
uid, token = make_token(user)
response = self.client.get(reverse('manager:schedule:pdf-{0}'.format(pdf_type),
kwargs={'pk': 1,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Schedule-1-{0}.pdf'.format(pdf_type))
# Approximate size only
self.assertGreater(int(response['Content-Length']), 29000)
self.assertLess(int(response['Content-Length']), 35000)
# Wrong or expired token
uid = 'MQ'
token = '3xv-57ef74923091fe7f186e'
response = self.client.get(reverse('manager:schedule:pdf-{0}'.format(pdf_type),
kwargs={'pk': 1,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 403)
def export_pdf(self, fail=False, pdf_type="log"):
"""
Helper function to test exporting a workout as a pdf
"""
response = self.client.get(reverse('manager:schedule:pdf-{0}'.format(pdf_type),
kwargs={'pk': 1}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Schedule-1-{0}.pdf'.format(pdf_type))
# Approximate size only
self.assertGreater(int(response['Content-Length']), 29000)
self.assertLess(int(response['Content-Length']), 35000)
def export_pdf_with_comments(self, fail=False, pdf_type="log"):
"""
Helper function to test exporting a workout as a pdf, with exercise coments
"""
user = User.objects.get(username='test')
uid, token = make_token(user)
response = self.client.get(reverse('manager:schedule:pdf-{0}'.format(pdf_type),
kwargs={'pk': 3,
'images': 0,
'comments': 1,
'uidb64': uid,
'token': token}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Schedule-3-{0}.pdf'.format(pdf_type))
# Approximate size only
self.assertGreater(int(response['Content-Length']), 29000)
self.assertLess(int(response['Content-Length']), 35000)
def export_pdf_with_images(self, fail=False, pdf_type="log"):
"""
Helper function to test exporting a workout as a pdf, with exercise images
"""
user = User.objects.get(username='test')
uid, token = make_token(user)
response = self.client.get(reverse('manager:schedule:pdf-{0}'.format(pdf_type),
kwargs={'pk': 3,
'images': 1,
'comments': 0,
'uidb64': uid,
'token': token}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Schedule-3-{0}.pdf'.format(pdf_type))
# Approximate size only
self.assertGreater(int(response['Content-Length']), 29000)
self.assertLess(int(response['Content-Length']), 35000)
def export_pdf_with_images_and_comments(self, fail=False, pdf_type="log"):
"""
Helper function to test exporting a workout as a pdf, with images and comments
"""
user = User.objects.get(username='test')
uid, token = make_token(user)
response = self.client.get(reverse('manager:schedule:pdf-{0}'.format(pdf_type),
kwargs={'pk': 3,
'images': 1,
'comments': 1,
'uidb64': uid,
'token': token}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Schedule-3-{0}.pdf'.format(pdf_type))
# Approximate size only
self.assertGreater(int(response['Content-Length']), 29000)
self.assertLess(int(response['Content-Length']), 35000)
def test_export_pdf_log_anonymous(self):
"""
Tests exporting a workout as a pdf as an anonymous user
"""
self.export_pdf(fail=True)
self.export_pdf_token()
def test_export_pdf_log_owner(self):
"""
Tests exporting a workout as a pdf as the owner user
"""
self.user_login('test')
self.export_pdf(fail=False)
self.export_pdf_token()
def test_export_pdf_log_other(self):
"""
Tests exporting a workout as a pdf as a logged user not owning the data
"""
self.user_login('admin')
self.export_pdf(fail=True)
self.export_pdf_token()
def test_export_pdf_log_with_comments(self, fail=False):
"""
Tests exporting a workout as a pdf as the owner user with comments
"""
self.user_login('test')
self.export_pdf_with_comments(fail=False)
self.export_pdf_token()
def test_export_pdf_log_with_images(self, fail=False):
"""
Tests exporting a workout as a pdf as the owner user with images
"""
self.user_login('test')
self.export_pdf_with_images(fail=False)
self.export_pdf_token()
def test_export_pdf_log_with_images_and_comments(self, fail=False):
"""
Tests exporting a workout as a pdf as the owner user with images andcomments
"""
self.user_login('test')
self.export_pdf_with_images_and_comments(fail=False)
self.export_pdf_token()
# #####TABLE#####
def test_export_pdf_table_anonymous(self):
"""
Tests exporting a workout as a pdf as an anonymous user
"""
self.export_pdf(fail=True, pdf_type="table")
self.export_pdf_token(pdf_type="table")
def test_export_pdf_table_owner(self):
"""
Tests exporting a workout as a pdf as the owner user
"""
self.user_login('test')
self.export_pdf(fail=False, pdf_type="table")
self.export_pdf_token(pdf_type="table")
def test_export_pdf_table_other(self):
"""
Tests exporting a workout as a pdf as a logged user not owning the data
"""
self.user_login('admin')
self.export_pdf(fail=True, pdf_type="table")
self.export_pdf_token(pdf_type="table")
def test_export_pdf_table_with_comments(self, fail=False):
"""
Tests exporting a workout as a pdf as the owner user with comments
"""
self.user_login('test')
self.export_pdf_with_comments(fail=False, pdf_type="table")
self.export_pdf_token(pdf_type="table")
def test_export_pdf_table_with_images(self, fail=False):
"""
Tests exporting a workout as a pdf as the owner user with images
"""
self.user_login('test')
self.export_pdf_with_images(fail=False, pdf_type="table")
self.export_pdf_token(pdf_type="table")
def test_export_pdf_table_with_images_and_comments(self, fail=False):
"""
Tests exporting a workout as a pdf as the owner user with images andcomments
"""
self.user_login('test')
self.export_pdf_with_images_and_comments(fail=False, pdf_type="table")
self.export_pdf_token(pdf_type="table")
class ScheduleApiTestCase(api_base_test.ApiBaseResourceTestCase):
"""
Tests the schedule overview resource
"""
pk = 1
resource = Schedule
private_resource = True
data = {'name': 'An updated name',
'start_date': datetime.date.today(),
'is_active': True,
'is_loop': True}
| rolandgeider/wger | wger/manager/tests/test_schedule.py | Python | agpl-3.0 | 23,910 |
# pylint: disable-all
# flake8: noqa
import sys
sys.path.append("..")
from todopagoconnector import TodoPagoConnector
from SendAuthorizeRequestData import SendAuthorizeRequestData
import unittest
from unittest import TestCase
if sys.version_info[0] >= 3:
from unittest.mock import patch, Mock
else:
from mock import patch, Mock, MagicMock
class SendAuthorizeRequestTest(TestCase):
@patch('todopagoconnector.TodoPagoConnector')
def test_get_credentials_ok(self, MockTodoPagoConnector):
j_header_http = {
'Authorization': 'TODOPAGO f3d8b72c94ab4a06be2ef7c95490f7d3'
}
MTPConnector = MockTodoPagoConnector(j_header_http, "test")
instanceSARData = SendAuthorizeRequestData()
MTPConnector.sendAuthorize.return_value = instanceSARData.send_authorize_request_ok_response()
responseSAR = MTPConnector.sendAuthorize(
instanceSARData.get_options_SAR_comercio_params(),
instanceSARData.get_options_SAR_operation_params())
self.assertEqual(responseSAR['StatusCode'], -1)
@patch('todopagoconnector.TodoPagoConnector')
def test_get_credentials_fail(self, MockTodoPagoConnector):
j_header_http = {
'Authorization': 'TODOPAGO f3d8b72c94ab4a06be2ef7c95490f7d3'
}
MTPConnector = MockTodoPagoConnector(j_header_http, "test")
instanceSAR = SendAuthorizeRequestData()
MTPConnector.sendAuthorize.return_value = instanceSAR.send_authorize_request_fail_response()
responseSAR = MTPConnector.sendAuthorize(
instanceSAR.get_options_SAR_comercio_params(),
instanceSAR.get_options_SAR_operation_params())
self.assertNotEquals(responseSAR['StatusCode'], -1)
@patch('todopagoconnector.TodoPagoConnector')
def test_get_credentials_702(self, MockTodoPagoConnector):
j_header_http = {
'Authorization': 'TODOPAGO f3d8b72c94ab4a06be2ef7c95490f7d3'
}
MTPConnector = MockTodoPagoConnector(j_header_http, "test")
instanceSAR = SendAuthorizeRequestData()
MTPConnector.sendAuthorize.return_value = instanceSAR.send_authorize_request_702_response()
responseSAR = MTPConnector.sendAuthorize(
instanceSAR.get_options_SAR_comercio_params(),
instanceSAR.get_options_SAR_operation_params())
self.assertNotEquals(responseSAR['StatusCode'], -1)
if __name__ == '__main__':
unittest.main()
| ingadhoc/website | payment_todopago/todopago/test/SendAuthorizeRequestTest.py | Python | agpl-3.0 | 2,473 |
# -*- coding: utf-8 -*-
# Copyright(C) 2014 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import datetime
import re
import unicodedata
from decimal import Decimal, InvalidOperation
from itertools import islice
from collections import Iterator
from dateutil.parser import parse as parse_date
from weboob.capabilities.base import empty
from weboob.tools.compat import basestring
from weboob.exceptions import ParseError
from weboob.browser.url import URL
from weboob.tools.log import getLogger, DEBUG_FILTERS
class NoDefault(object):
def __repr__(self):
return 'NO_DEFAULT'
_NO_DEFAULT = NoDefault()
__all__ = ['FilterError', 'ColumnNotFound', 'RegexpError', 'ItemNotFound',
'Filter', 'Base', 'Env', 'TableCell', 'RawText',
'CleanText', 'Lower', 'CleanDecimal', 'Field', 'Regexp', 'Map',
'DateTime', 'Date', 'Time', 'DateGuesser', 'Duration',
'MultiFilter', 'CombineDate', 'Format', 'Join', 'Type',
'BrowserURL', 'Async', 'AsyncLoad']
class FilterError(ParseError):
pass
class ColumnNotFound(FilterError):
pass
class RegexpError(FilterError):
pass
class ItemNotFound(FilterError):
pass
class _Filter(object):
_creation_counter = 0
def __init__(self, default=_NO_DEFAULT):
self._key = None
self._obj = None
self.default = default
self._creation_counter = _Filter._creation_counter
_Filter._creation_counter += 1
def __or__(self, o):
self.default = o
return self
def __and__(self, o):
if isinstance(o, type) and issubclass(o, _Filter):
o = o()
o.selector = self
return o
def default_or_raise(self, exception):
if self.default is not _NO_DEFAULT:
return self.default
else:
raise exception
def __str__(self):
return self.__class__.__name__
def debug(*args):
"""
A decorator function to provide some debug information
in Filters.
It prints by default the name of the Filter and the input value.
"""
def wraper(function):
def print_debug(self, value):
logger = getLogger('b2filters')
result = ''
outputvalue = value
if isinstance(value, list):
from lxml import etree
outputvalue = ''
first = True
for element in value:
if first:
first = False
else:
outputvalue += ', '
if isinstance(element, etree.ElementBase):
outputvalue += "%s" % etree.tostring(element, encoding=unicode)
else:
outputvalue += "%r" % element
if self._obj is not None:
result += "%s" % self._obj._random_id
if self._key is not None:
result += ".%s" % self._key
name = str(self)
result += " %s(%r" % (name, outputvalue)
for arg in self.__dict__:
if arg.startswith('_') or arg == u"selector":
continue
if arg == u'default' and getattr(self, arg) == _NO_DEFAULT:
continue
result += ", %s=%r" % (arg, getattr(self, arg))
result += u')'
logger.log(DEBUG_FILTERS, result)
res = function(self, value)
return res
return print_debug
return wraper
class Filter(_Filter):
"""
Class used to filter on a HTML element given as call parameter to return
matching elements.
Filters can be chained, so the parameter supplied to constructor can be
either a xpath selector string, or an other filter called before.
>>> from lxml.html import etree
>>> f = CleanDecimal(CleanText('//p'), replace_dots=True)
>>> f(etree.fromstring('<html><body><p>blah: <span>229,90</span></p></body></html>'))
Decimal('229.90')
"""
def __init__(self, selector=None, default=_NO_DEFAULT):
super(Filter, self).__init__(default=default)
self.selector = selector
@classmethod
def select(cls, selector, item, obj=None, key=None):
if isinstance(selector, basestring):
return item.xpath(selector)
elif isinstance(selector, _Filter):
selector._key = key
selector._obj = obj
return selector(item)
elif callable(selector):
return selector(item)
else:
return selector
def __call__(self, item):
return self.filter(self.select(self.selector, item, key=self._key, obj=self._obj))
@debug()
def filter(self, value):
"""
This method have to be overrided by children classes.
"""
raise NotImplementedError()
class _Selector(Filter):
def filter(self, elements):
if elements is not None:
return elements
else:
return self.default_or_raise(ParseError('Element %r not found' % self.selector))
class AsyncLoad(Filter):
def __call__(self, item):
link = self.select(self.selector, item, key=self._key, obj=self._obj)
return item.page.browser.async_open(link)
class Async(_Filter):
def __init__(self, name, selector=None):
super(Async, self).__init__()
self.selector = selector
self.name = name
def __and__(self, o):
if isinstance(o, type) and issubclass(o, _Filter):
o = o()
self.selector = o
return self
def __call__(self, item):
result = item.loaders[self.name].result()
assert result.page is not None, 'The loaded url %s hasn\'t been matched by an URL object' % result.url
return self.selector(result.page.doc)
class Base(Filter):
"""
Change the base element used in filters.
>>> Base(Env('header'), CleanText('./h1')) # doctest: +SKIP
"""
def __call__(self, item):
base = self.select(self.base, item, obj=self._obj, key=self._key)
return self.selector(base)
def __init__(self, base, selector=None, default=_NO_DEFAULT):
super(Base, self).__init__(selector, default)
self.base = base
class Env(_Filter):
"""
Filter to get environment value of the item.
It is used for example to get page parameters, or when there is a parse()
method on ItemElement.
"""
def __init__(self, name, default=_NO_DEFAULT):
super(Env, self).__init__(default)
self.name = name
def __call__(self, item):
try:
return item.env[self.name]
except KeyError:
return self.default_or_raise(ParseError('Environment variable %s not found' % self.name))
class TableCell(_Filter):
"""
Used with TableElement, it get the cell value from its name.
For example:
>>> from weboob.capabilities.bank import Transaction
>>> from weboob.browser.elements import TableElement, ItemElement
>>> class table(TableElement):
... head_xpath = '//table/thead/th'
... item_xpath = '//table/tbody/tr'
... col_date = u'Date'
... col_label = [u'Name', u'Label']
... class item(ItemElement):
... klass = Transaction
... obj_date = Date(TableCell('date'))
... obj_label = CleanText(TableCell('label'))
...
"""
def __init__(self, *names, **kwargs):
super(TableCell, self).__init__(**kwargs)
self.names = names
def __call__(self, item):
for name in self.names:
idx = item.parent.get_colnum(name)
if idx is not None:
return item.xpath('./td[%s]' % (idx + 1))
return self.default_or_raise(ColumnNotFound('Unable to find column %s' % ' or '.join(self.names)))
class RawText(Filter):
@debug()
def filter(self, el):
if isinstance(el, (tuple, list)):
return u' '.join([self.filter(e) for e in el])
if el.text is None:
return self.default
else:
return unicode(el.text)
class CleanText(Filter):
"""
Get a cleaned text from an element.
It first replaces all tabs and multiple spaces
(including newlines if ``newlines`` is True)
to one space and strips the result string.
The result is coerced into unicode, and optionally normalized
according to the ``normalize`` argument.
Then it replaces all symbols given in the ``symbols`` argument.
>>> CleanText().filter('coucou ')
u'coucou'
>>> CleanText().filter(u'coucou\xa0coucou')
u'coucou coucou'
>>> CleanText(newlines=True).filter(u'coucou\\r\\n coucou ')
u'coucou coucou'
>>> CleanText(newlines=False).filter(u'coucou\\r\\n coucou ')
u'coucou\\ncoucou'
"""
def __init__(self, selector=None, symbols='', replace=[], children=True, newlines=True, normalize='NFC', **kwargs):
super(CleanText, self).__init__(selector, **kwargs)
self.symbols = symbols
self.toreplace = replace
self.children = children
self.newlines = newlines
self.normalize = normalize
@debug()
def filter(self, txt):
if isinstance(txt, (tuple, list)):
txt = u' '.join([self.clean(item, children=self.children) for item in txt])
txt = self.clean(txt, self.children, self.newlines, self.normalize)
txt = self.remove(txt, self.symbols)
txt = self.replace(txt, self.toreplace)
# ensure it didn't become str by mistake
return unicode(txt)
@classmethod
def clean(cls, txt, children=True, newlines=True, normalize='NFC'):
if not isinstance(txt, basestring):
if children:
txt = [t.strip() for t in txt.itertext()]
else:
txt = [txt.text.strip()]
txt = u' '.join(txt) # 'foo bar'
if newlines:
txt = re.compile(u'\s+', flags=re.UNICODE).sub(u' ', txt) # 'foo bar'
else:
# normalize newlines and clean what is inside
txt = '\n'.join([cls.clean(l) for l in txt.splitlines()])
txt = txt.strip()
# lxml under Python 2 returns str instead of unicode if it is pure ASCII
txt = unicode(txt)
# normalize to a standard Unicode form
if normalize:
txt = unicodedata.normalize(normalize, txt)
return txt
@classmethod
def remove(cls, txt, symbols):
for symbol in symbols:
txt = txt.replace(symbol, '')
return txt.strip()
@classmethod
def replace(cls, txt, replace):
for before, after in replace:
txt = txt.replace(before, after)
return txt
class Lower(CleanText):
@debug()
def filter(self, txt):
txt = super(Lower, self).filter(txt)
return txt.lower()
class CleanDecimal(CleanText):
"""
Get a cleaned Decimal value from an element.
replace_dots is False by default. A dot is interpreted as a decimal separator.
If replace_dots is set to True, we remove all the dots. The ',' is used as decimal
separator (often useful for French values)
If replace_dots is a tuple, the first element will be used as the thousands separator,
and the second as the decimal separator.
See http://en.wikipedia.org/wiki/Thousands_separator#Examples_of_use
For example, for the UK style (as in 1,234,567.89):
>>> CleanDecimal('./td[1]', replace_dots=(',', '.')) # doctest: +SKIP
"""
def __init__(self, selector=None, replace_dots=False, sign=None, default=_NO_DEFAULT):
super(CleanDecimal, self).__init__(selector, default=default)
self.replace_dots = replace_dots
self.sign = sign
@debug()
def filter(self, text):
if empty(text):
return self.default_or_raise(ParseError('Unable to parse %r' % text))
original_text = text = super(CleanDecimal, self).filter(text)
if self.replace_dots:
if type(self.replace_dots) is tuple:
thousands_sep, decimal_sep = self.replace_dots
else:
thousands_sep, decimal_sep = '.', ','
text = text.replace(thousands_sep, '').replace(decimal_sep, '.')
try:
v = Decimal(re.sub(r'[^\d\-\.]', '', text))
if self.sign:
v *= self.sign(original_text)
return v
except InvalidOperation as e:
return self.default_or_raise(e)
class Slugify(Filter):
@debug()
def filter(self, label):
label = re.sub(r'[^A-Za-z0-9]', ' ', label.lower()).strip()
label = re.sub(r'\s+', '-', label)
return label
class Type(Filter):
"""
Get a cleaned value of any type from an element text.
The type_func can be any callable (class, function, etc.).
By default an empty string will not be parsed but it can be changed
by specifying minlen=False. Otherwise, a minimal length can be specified.
>>> Type(CleanText('./td[1]'), type=int) # doctest: +SKIP
>>> Type(type=int).filter('42')
42
>>> Type(type=int, default='NaN').filter('')
'NaN'
>>> Type(type=str, minlen=False, default='a').filter('')
''
>>> Type(type=str, minlen=0, default='a').filter('')
'a'
"""
def __init__(self, selector=None, type=None, minlen=0, default=_NO_DEFAULT):
super(Type, self).__init__(selector, default=default)
self.type_func = type
self.minlen = minlen
@debug()
def filter(self, txt):
if empty(txt):
return self.default_or_raise(ParseError('Unable to parse %r' % txt))
if self.minlen is not False and len(txt) <= self.minlen:
return self.default_or_raise(ParseError('Unable to parse %r' % txt))
try:
return self.type_func(txt)
except ValueError as e:
return self.default_or_raise(ParseError('Unable to parse %r: %s' % (txt, e)))
class Field(_Filter):
"""
Get the attribute of object.
"""
def __init__(self, name):
super(Field, self).__init__()
self.name = name
def __call__(self, item):
return item.use_selector(getattr(item, 'obj_%s' % self.name), key=self._key)
# Based on nth from https://docs.python.org/2/library/itertools.html
def nth(iterable, n, default=None):
"Returns the nth item or a default value, n can be negative, or '*' for all"
if n == '*':
return iterable
if n < 0:
iterable = reversed(list(iterable))
n = abs(n) - 1
return next(islice(iterable, n, None), default)
def ordinal(n):
"To have some readable debug information: '*' => all, 0 => 1st, 1 => 2nd..."
if n == '*':
return 'all'
i = abs(n)
n = n - 1 if n < 0 else n + 1
return str(n) + ('th' if i > 2 else ['st', 'nd', 'rd'][i])
class Regexp(Filter):
r"""
Apply a regex.
>>> from lxml.html import etree
>>> doc = etree.fromstring('<html><body><p>Date: <span>13/08/1988</span></p></body></html>')
>>> Regexp(CleanText('//p'), r'Date: (\d+)/(\d+)/(\d+)', '\\3-\\2-\\1')(doc)
u'1988-08-13'
>>> (Regexp(CleanText('//body'), r'(\d+)', nth=1))(doc)
u'08'
>>> (Regexp(CleanText('//body'), r'(\d+)', nth=-1))(doc)
u'1988'
>>> (Regexp(CleanText('//body'), r'(\d+)', template='[\\1]', nth='*'))(doc)
[u'[13]', u'[08]', u'[1988]']
"""
def __init__(self, selector=None, pattern=None, template=None, nth=0, flags=0, default=_NO_DEFAULT):
super(Regexp, self).__init__(selector, default=default)
assert pattern is not None
self.pattern = pattern
self._regex = re.compile(pattern, flags)
self.template = template
self.nth = nth
def expand(self, m):
if self.template is None:
return next(g for g in m.groups() if g is not None)
return self.template(m) if callable(self.template) else m.expand(self.template)
@debug()
def filter(self, txt):
if isinstance(txt, (tuple, list)):
txt = u' '.join([t.strip() for t in txt.itertext()])
m = self._regex.search(txt) if self.nth == 0 else \
nth(self._regex.finditer(txt), self.nth)
if not m:
msg = 'Unable to find %s %s in %r' % (ordinal(self.nth), self.pattern, txt)
return self.default_or_raise(RegexpError(msg))
if isinstance(m, Iterator):
return map(self.expand, m)
return self.expand(m)
class Map(Filter):
def __init__(self, selector, map_dict, default=_NO_DEFAULT):
super(Map, self).__init__(selector, default=default)
self.map_dict = map_dict
@debug()
def filter(self, txt):
try:
return self.map_dict[txt]
except KeyError:
return self.default_or_raise(ItemNotFound('Unable to handle %r on %r' % (txt, self.map_dict)))
class DateTime(Filter):
def __init__(self, selector=None, default=_NO_DEFAULT, dayfirst=False, translations=None):
super(DateTime, self).__init__(selector, default=default)
self.dayfirst = dayfirst
self.translations = translations
@debug()
def filter(self, txt):
if empty(txt) or txt == '':
return self.default_or_raise(ParseError('Unable to parse %r' % txt))
try:
if self.translations:
for search, repl in self.translations:
txt = search.sub(repl, txt)
return parse_date(txt, dayfirst=self.dayfirst)
except (ValueError, TypeError) as e:
return self.default_or_raise(ParseError('Unable to parse %r: %s' % (txt, e)))
class Date(DateTime):
def __init__(self, selector=None, default=_NO_DEFAULT, dayfirst=False, translations=None):
super(Date, self).__init__(selector, default=default, dayfirst=dayfirst, translations=translations)
@debug()
def filter(self, txt):
datetime = super(Date, self).filter(txt)
if hasattr(datetime, 'date'):
return datetime.date()
else:
return datetime
class DateGuesser(Filter):
def __init__(self, selector, date_guesser, **kwargs):
super(DateGuesser, self).__init__(selector)
self.date_guesser = date_guesser
self.kwargs = kwargs
def __call__(self, item):
values = self.select(self.selector, item, obj=self._obj, key=self._key)
date_guesser = self.date_guesser
# In case Env() is used to kive date_guesser.
if isinstance(date_guesser, _Filter):
date_guesser = self.select(date_guesser, item, obj=self._obj, key=self._key)
if isinstance(values, basestring):
values = re.split('[/-]', values)
if len(values) == 2:
day, month = map(int, values)
else:
raise ParseError('Unable to take (day, month) tuple from %r' % values)
return date_guesser.guess_date(day, month, **self.kwargs)
class Time(Filter):
klass = datetime.time
_regexp = re.compile(r'(?P<hh>\d+):?(?P<mm>\d+)(:(?P<ss>\d+))?')
kwargs = {'hour': 'hh', 'minute': 'mm', 'second': 'ss'}
def __init__(self, selector=None, default=_NO_DEFAULT):
super(Time, self).__init__(selector, default=default)
@debug()
def filter(self, txt):
m = self._regexp.search(txt)
if m:
kwargs = {}
for key, index in self.kwargs.iteritems():
kwargs[key] = int(m.groupdict()[index] or 0)
return self.klass(**kwargs)
return self.default_or_raise(ParseError('Unable to find time in %r' % txt))
class Duration(Time):
klass = datetime.timedelta
regexp = re.compile(r'((?P<hh>\d+)[:;])?(?P<mm>\d+)[;:](?P<ss>\d+)')
kwargs = {'hours': 'hh', 'minutes': 'mm', 'seconds': 'ss'}
class MultiFilter(Filter):
def __init__(self, *args, **kwargs):
default = kwargs.pop('default', _NO_DEFAULT)
super(MultiFilter, self).__init__(args, default)
def __call__(self, item):
values = [self.select(selector, item, obj=self._obj, key=self._key) for selector in self.selector]
return self.filter(tuple(values))
def filter(self, values):
raise NotImplementedError()
class CombineDate(MultiFilter):
def __init__(self, date, time):
super(CombineDate, self).__init__(date, time)
@debug()
def filter(self, values):
return datetime.datetime.combine(values[0], values[1])
class Format(MultiFilter):
def __init__(self, fmt, *args):
super(Format, self).__init__(*args)
self.fmt = fmt
@debug()
def filter(self, values):
return self.fmt % values
class BrowserURL(MultiFilter):
def __init__(self, url_name, **kwargs):
super(BrowserURL, self).__init__(*kwargs.values())
self.url_name = url_name
self.keys = kwargs.keys()
def __call__(self, item):
values = super(BrowserURL, self).__call__(item)
url = getattr(item.page.browser, self.url_name)
assert isinstance(url, URL), "%s.%s must be an URL object" % (type(item.page.browser).__name__, self.url_name)
return url.build(**dict(zip(self.keys, values)))
@debug()
def filter(self, values):
return values
class Join(Filter):
def __init__(self, pattern, selector=None, textCleaner=CleanText):
super(Join, self).__init__(selector)
self.pattern = pattern
self.textCleaner = textCleaner
@debug()
def filter(self, el):
res = u''
for li in el:
res += self.pattern % self.textCleaner.clean(li)
return res
def test_CleanText():
# This test works poorly under a doctest, or would be hard to read
assert CleanText().filter(u' coucou \n\théhé') == u'coucou héhé'
assert CleanText().filter('coucou\xa0coucou') == CleanText().filter(u'coucou\xa0coucou') == u'coucou coucou'
# Unicode normalization
assert CleanText().filter(u'Éçã') == u'Éçã'
assert CleanText(normalize='NFKC').filter(u'…') == u'...'
assert CleanText().filter(u'…') == u'…'
# Diacritical mark (dakuten)
assert CleanText().filter(u'\u3053\u3099') == u'\u3054'
assert CleanText(normalize='NFD').filter(u'\u3053\u3099') == u'\u3053\u3099'
assert CleanText(normalize='NFD').filter(u'\u3054') == u'\u3053\u3099'
assert CleanText(normalize=False).filter(u'\u3053\u3099') == u'\u3053\u3099'
| frankrousseau/weboob | weboob/browser/filters/standard.py | Python | agpl-3.0 | 23,209 |
"""Test jail_code.py"""
import os
import shutil
import sys
import textwrap
import tempfile
import unittest
from nose.plugins.skip import SkipTest
from codejail.jail_code import jail_code, is_configured, Jail, configure, auto_configure
auto_configure()
def jailpy(code=None, *args, **kwargs):
"""Run `jail_code` on Python."""
if code:
code = textwrap.dedent(code)
result = jail_code("python", code, *args, **kwargs)
if isinstance(result.stdout, bytes):
result.stdout = result.stdout.decode()
if isinstance(result.stderr, bytes):
result.stderr = result.stderr.decode()
return result
def file_here(fname):
"""Return the full path to a file alongside this code."""
return os.path.join(os.path.dirname(__file__), fname)
class JailCodeHelpers(unittest.TestCase):
"""Assert helpers for jail_code tests."""
def setUp(self):
super(JailCodeHelpers, self).setUp()
if not is_configured("python"):
raise SkipTest
def assertResultOk(self, res):
"""Assert that `res` exited well (0), and had no stderr output."""
self.assertEqual(res.stderr, "")
self.assertEqual(res.status, 0)
class TestFeatures(JailCodeHelpers):
"""Test features of how `jail_code` runs Python."""
def test_hello_world(self):
res = jailpy(code="print('Hello, world!')")
self.assertResultOk(res)
self.assertEqual(res.stdout, 'Hello, world!\n')
def test_argv(self):
res = jailpy(
code="import sys; print(':'.join(sys.argv[1:]))",
argv=["Hello", "world", "-x"]
)
self.assertResultOk(res)
self.assertEqual(res.stdout, "Hello:world:-x\n")
def test_ends_with_exception(self):
res = jailpy(code="""raise Exception('FAIL')""")
self.assertNotEqual(res.status, 0)
self.assertEqual(res.stdout, "")
self.assertEqual(res.stderr, textwrap.dedent("""\
Traceback (most recent call last):
File "jailed_code", line 1, in <module>
raise Exception('FAIL')
Exception: FAIL
"""))
def test_stdin_is_provided(self):
res = jailpy(
code="import json,sys; print(sum(json.load(sys.stdin)))",
stdin="[1, 2.5, 33]"
)
self.assertResultOk(res)
self.assertEqual(res.stdout, "36.5\n")
def test_files_are_copied(self):
res = jailpy(
code="print('Look:', open('hello.txt').read())",
files=[file_here("hello.txt")]
)
self.assertResultOk(res)
self.assertEqual(res.stdout, 'Look: Hello there.\n\n')
def test_directories_are_copied(self):
res = jailpy(
code="""\
import os
for path, dirs, files in os.walk("."):
print((path, sorted(dirs), sorted(files)))
""",
files=[file_here("hello.txt"), file_here("pylib")]
)
self.assertResultOk(res)
self.assertIn("hello.txt", res.stdout)
self.assertIn("pylib", res.stdout)
self.assertIn("module.py", res.stdout)
def test_executing_a_copied_file(self):
res = jailpy(
files=[file_here("doit.py")],
argv=["doit.py", "1", "2", "3"]
)
self.assertResultOk(res)
self.assertEqual(
res.stdout,
"This is doit.py!\nMy args are ['doit.py', '1', '2', '3']\n"
)
def test_context_managers(self):
first = textwrap.dedent("""
with open("hello.txt", "w") as f:
f.write("Hello, second")
""")
second = textwrap.dedent("""
with open("hello.txt") as f:
print(f.read())
""")
limits = {"TIME": 1, "MEMORY": 128*1024*1024,
"CAN_FORK": True, "FILE_SIZE": 256}
configure("unconfined_python", sys.prefix + "/bin/python", limits_conf=limits)
with Jail() as j:
res = j.run_code("unconfined_python", first)
self.assertEqual(res.status, 0)
res = j.run_code("python", second)
self.assertEqual(res.status, 0)
self.assertEqual(res.stdout.decode().strip(), "Hello, second")
class TestLimits(JailCodeHelpers):
"""Tests of the resource limits, and changing them."""
def test_cant_use_too_much_memory(self):
# This will fail after setting the limit to 30Mb.
res = jailpy(code="print(len(bytearray(50000000)))", limits={'MEMORY': 30000000})
self.assertEqual(res.stdout, "")
self.assertNotEqual(res.status, 0)
def test_changing_vmem_limit(self):
# Up the limit, it will succeed.
res = jailpy(code="print(len(bytearray(50000000)))", limits={'MEMORY': 80000000})
self.assertEqual(res.stdout, "50000000\n")
self.assertEqual(res.status, 0)
def test_disabling_vmem_limit(self):
# Disable the limit, it will succeed.
res = jailpy(code="print(len(bytearray(50000000)))", limits={'MEMORY': None})
self.assertEqual(res.stdout, "50000000\n")
self.assertEqual(res.status, 0)
def test_cant_use_too_much_cpu(self):
res = jailpy(code="print(sum(range(10**9)))")
self.assertEqual(res.stdout, "")
self.assertNotEqual(res.status, 0)
self.assertTrue(res.time_limit_exceeded)
def test_cant_use_too_much_time(self):
# time limit is 5 * cpu_time
res = jailpy(code="import time; time.sleep(7); print('Done!')", limits={'TIME': 1})
self.assertNotEqual(res.status, 0)
self.assertEqual(res.stdout, "")
self.assertTrue(res.time_limit_exceeded)
def test_cant_write_files(self):
res = jailpy(code="""\
print("Trying")
with open("mydata.txt", "w") as f:
f.write("hello")
with open("mydata.txt") as f2:
print("Got this:", f2.read())
""")
self.assertNotEqual(res.status, 0)
self.assertEqual(res.stdout, "Trying\n")
self.assertIn("ermission denied", res.stderr)
def test_cant_use_network(self):
res = jailpy(code="""\
import urllib.request
print("Reading google")
u = urllib.request.urlopen("http://google.com")
google = u.read()
print(len(google))
""")
self.assertNotEqual(res.status, 0)
self.assertEqual(res.stdout, "Reading google\n")
self.assertIn("URLError", res.stderr)
def test_cant_fork(self):
res = jailpy(code="""\
import os
print("Forking")
child_ppid = os.fork()
""")
self.assertNotEqual(res.status, 0)
self.assertEqual(res.stdout, "Forking\n")
self.assertIn("IOError", res.stderr)
def test_cant_see_environment_variables(self):
os.environ['HONEY_BOO_BOO'] = 'Look!'
res = jailpy(code="""\
import os
for name, value in os.environ.items():
print("%s: %r" % (name, value))
""")
self.assertResultOk(res)
self.assertNotIn("HONEY", res.stdout)
def test_reading_dev_random(self):
# We can read 10 bytes just fine.
res = jailpy(code="x = open('/dev/random', 'rb').read(10); print(len(x))")
self.assertResultOk(res)
self.assertEqual(res.stdout, "10\n")
# If we try to read all of it, we'll be killed by the real-time limit.
res = jailpy(code="x = open('/dev/random').read(); print('Done!')")
self.assertNotEqual(res.status, 0)
class TestSymlinks(JailCodeHelpers):
"""Testing symlink behavior."""
def setUp(self):
# Make a temp dir, and arrange to have it removed when done.
tmp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmp_dir)
# Make a directory that won't be copied into the sandbox.
self.not_copied = os.path.join(tmp_dir, "not_copied")
os.mkdir(self.not_copied)
self.linked_txt = os.path.join(self.not_copied, "linked.txt")
with open(self.linked_txt, "w") as linked:
linked.write("Hi!")
# Make a directory that will be copied into the sandbox, with a
# symlink to a file we aren't copying in.
self.copied = os.path.join(tmp_dir, "copied")
os.mkdir(self.copied)
self.here_txt = os.path.join(self.copied, "here.txt")
with open(self.here_txt, "w") as here:
here.write("012345")
self.link_txt = os.path.join(self.copied, "link.txt")
os.symlink(self.linked_txt, self.link_txt)
self.herelink_txt = os.path.join(self.copied, "herelink.txt")
os.symlink("here.txt", self.herelink_txt)
def test_symlinks_in_directories_wont_copy_data(self):
# Run some code in the sandbox, with a copied directory containing
# the symlink.
res = jailpy(
code="""\
print(open('copied/here.txt').read()) # can read
print(open('copied/herelink.txt').read()) # can read
print(open('copied/link.txt').read()) # can't read
""",
files=[self.copied],
)
self.assertEqual(res.stdout, "012345\n012345\n")
self.assertIn("ermission denied", res.stderr)
def test_symlinks_wont_copy_data(self):
# Run some code in the sandbox, with a copied file which is a symlink.
res = jailpy(
code="""\
print(open('here.txt').read()) # can read
print(open('herelink.txt').read()) # can read
print(open('link.txt').read()) # can't read
""",
files=[self.here_txt, self.herelink_txt, self.link_txt],
)
self.assertEqual(res.stdout, "012345\n012345\n")
self.assertIn("ermission denied", res.stderr)
class TestMalware(JailCodeHelpers):
"""Tests that attempt actual malware against the interpreter or system."""
def test_crash_cpython(self):
# http://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html
res = jailpy(code="""\
import types, sys
bad_code = types.CodeType(0,0,0,0,0,b"KABOOM",(),(),(),"","",0,b"")
crash_me = types.FunctionType(bad_code, {})
print("Here we go...")
sys.stdout.flush()
crash_me()
print("The afterlife!")
""")
self.assertNotEqual(res.status, 0)
self.assertEqual(res.stderr, "")
self.assertEqual(res.stdout, "Here we go...\n")
def test_read_etc_passwd(self):
res = jailpy(code="""\
bytes = len(open('/etc/passwd').read())
print('Gotcha', bytes)
""")
self.assertNotEqual(res.status, 0)
self.assertEqual(res.stdout, "")
self.assertIn("ermission denied", res.stderr)
def test_find_other_sandboxes(self):
res = jailpy(code="""
import os
places = [
"..", "/tmp", "/", "/home", "/etc", "/var"
]
for place in places:
try:
files = os.listdir(place)
except Exception:
# darn
pass
else:
print("Files in %r: %r" % (place, files))
print("Done.")
""")
self.assertResultOk(res)
self.assertEqual(res.stdout, "Done.\n")
| StepicOrg/codejail | codejail/tests/test_jail_code.py | Python | agpl-3.0 | 11,655 |
# -*- coding: utf-8 -*-
import inspect
class Iterable:
# Jinja2 wants to iterate over all properties; __dict__ doesn't return the @property ones
def __iter__(self):
for attr, value in inspect.getmembers(self):
if not attr.startswith('_'):
yield attr, value
| pypt/shaibos | shaibos/util/iterable.py | Python | lgpl-2.1 | 304 |
#!/usr/bin/env python
import os
import codecs
from xml.dom import minidom
import subprocess
import sys
INKSCAPE = '/usr/bin/inkscape'
def list_layers(svg):
layers = [ ]
for g in svg.getElementsByTagName("g"):
if g.attributes.has_key("inkscape:label"):
layers.append(g.attributes["inkscape:label"].value)
return layers
def export_layer(svg, directory, layer, stay):
if layer in stay:
return
print layer, "..."
for g in svg.getElementsByTagName("g"):
if g.attributes.has_key("inkscape:label"):
label = g.attributes["inkscape:label"].value
if label == layer or label in stay:
g.attributes['style'] = 'display:inline'
else:
g.attributes['style'] = 'display:none'
dest = os.path.join(directory, layer + ".svg")
codecs.open(dest, "w", encoding="utf8").write(svg.toxml())
png = os.path.join(directory, layer + ".png")
subprocess.check_call([INKSCAPE, "--export-png", png, dest])
os.unlink(dest)
def main():
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument('--stay', action='append', default=[], help='layer to always have visible')
parser.add_argument('src', help='source SVG file.')
args = parser.parse_args()
svg = minidom.parse(open(args.src))
for layer in list_layers(svg):
export_layer(svg, os.path.dirname(args.src), layer, args.stay)
if __name__ == '__main__':
main()
| stefwalter/cockpituous | tests/doc/inkscape-export.py | Python | lgpl-2.1 | 1,516 |
#!/usr/bin/env python
import sys
sys.path.insert(0, "../..")
DEBUG = 0
if len(sys.argv) > 1:
DEBUG = 1
from Kiwi.initgtk import gtk
from Kiwi.Proxies import Proxy
from Kiwi.Models import Model
class Foo(Model):
A = 1
B = 0
class CheckProxy(Proxy):
widgets = [":B", ":A"]
def __init__(self, model):
self._build()
Proxy.__init__(self, model, delete_handler=gtk.mainquit)
gtk.idle_add(self.focus_topmost)
def _build(self):
self.win = gtk.Window()
self.A = gtk.CheckButton("This is A")
self.B = gtk.CheckButton("This is B")
vbox = gtk.VBox()
vbox.add(self.A)
vbox.add(self.B)
self.win.add(vbox)
class ToggleProxy(CheckProxy):
def _build(self):
self.win = gtk.Window()
self.A = gtk.ToggleButton("This is A")
self.B = gtk.ToggleButton("This is B")
vbox = gtk.VBox()
vbox.add(self.A)
vbox.add(self.B)
self.win.add(vbox)
f = Foo()
c = CheckProxy(f)
assert f.A == 1
assert f.B == 0
c.A.clicked()
c.B.clicked()
assert f.A == 0, f.A
assert f.B == 1, f.B
if DEBUG: c.show_all_and_loop() ; print f.__dict__
print "CheckButton OK"
f = Foo()
c = ToggleProxy(f)
assert f.A == 1, f.A
assert f.B == 0, f.B
c.A.clicked()
c.B.clicked()
assert f.A == 0, f.A
assert f.B == 1, f.B
if DEBUG: c.show_all_and_loop(); print f.__dict__
print "ToggleButton OK"
| Schevo/kiwi | tests/Proxies/CheckButton.py | Python | lgpl-2.1 | 1,404 |
#!/usr/bin/env python
import os
import sys
try:
import py2deb
except ImportError:
import fake_py2deb as py2deb
import constants
__app_name__ = constants.__app_name__
__description__ = """Very simple Audiobook player.
Supports playing, pausing, seeking (sort of) and saving state when changing book/closing.
Plays books arranged as dirs under myDocs/Audiobooks
.
Homepage: http://wiki.maemo.org/Nqaap"""
__author__ = "Soeren 'Pengman' Pedersen"
__email__ = "pengmeister@gmail.com"
__version__ = constants.__version__
__build__ = constants.__build__
__changelog__ = """
* More unicode improvements
""".strip()
__postinstall__ = """#!/bin/sh -e
gtk-update-icon-cache -f /usr/share/icons/hicolor
rm -f ~/.%(name)s/%(name)s.log
""" % {"name": constants.__app_name__}
def find_files(prefix, path):
for root, dirs, files in os.walk(path):
for file in files:
if file.startswith(prefix+"-"):
fileParts = file.split("-")
unused, relPathParts, newName = fileParts[0], fileParts[1:-1], fileParts[-1]
assert unused == prefix
relPath = os.sep.join(relPathParts)
yield relPath, file, newName
def unflatten_files(files):
d = {}
for relPath, oldName, newName in files:
if relPath not in d:
d[relPath] = []
d[relPath].append((oldName, newName))
return d
def build_package(distribution):
try:
os.chdir(os.path.dirname(sys.argv[0]))
except:
pass
py2deb.Py2deb.SECTIONS = py2deb.SECTIONS_BY_POLICY[distribution]
p = py2deb.Py2deb(__app_name__)
p.prettyName = constants.__pretty_app_name__
p.description = __description__
p.bugTracker="https://bugs.maemo.org/enter_bug.cgi?product=nQa%%20Audiobook%%20Player"
p.author = __author__
p.mail = __email__
p.license = "lgpl"
p.depends = ", ".join([
"python2.6 | python2.5",
"python-gtk2 | python2.5-gtk2",
"python-dbus | python2.5-dbus",
"python-telepathy | python2.5-telepathy",
"python-gobject | python2.5-gobject",
"python-simplejson",
])
maemoSpecificDepends = ", python-osso | python2.5-osso, python-hildon | python2.5-hildon"
p.depends += {
"debian": ", python-gst0.10",
"diablo": maemoSpecificDepends,
"fremantle": maemoSpecificDepends + ", python-gst0.10",
}[distribution]
p.section = {
"debian": "sound",
"diablo": "user/multimedia",
"fremantle": "user/multimedia",
}[distribution]
p.arch="all"
p.urgency="low"
p.distribution=distribution
p.repository="extras"
p.changelog = __changelog__
p.postinstall = __postinstall__
p.icon = {
"debian": "26x26-%s.png" % constants.__app_name__,
"diablo": "26x26-%s.png" % constants.__app_name__,
"fremantle": "48x48-%s.png" % constants.__app_name__,
}[distribution]
p["/opt/%s/bin" % constants.__app_name__] = [ "%s.py" % constants.__app_name__ ]
for relPath, files in unflatten_files(find_files("src", ".")).iteritems():
fullPath = "/opt/%s/lib" % constants.__app_name__
if relPath:
fullPath += os.sep+relPath
p[fullPath] = list(
"|".join((oldName, newName))
for (oldName, newName) in files
)
p["/usr/share/applications/hildon"] = ["%s.desktop" % constants.__app_name__]
p["/usr/share/icons/hicolor/26x26/hildon"] = ["26x26-%s.png|%s.png" % (constants.__app_name__, constants.__app_name__)]
p["/usr/share/icons/hicolor/48x48/hildon"] = ["48x48-%s.png|%s.png" % (constants.__app_name__, constants.__app_name__)]
p["/usr/share/icons/hicolor/64x64/hildon"] = ["64x64-%s.png|%s.png" % (constants.__app_name__, constants.__app_name__)]
p["/usr/share/icons/hicolor/scalable/hildon"] = ["scale-%s.png|%s.png" % (constants.__app_name__, constants.__app_name__)]
print p
if distribution == "debian":
print p.generate(
version="%s-%s" % (__version__, __build__),
changelog=__changelog__,
build=True,
tar=False,
changes=False,
dsc=False,
)
else:
print p.generate(
version="%s-%s" % (__version__, __build__),
changelog=__changelog__,
build=False,
tar=True,
changes=True,
dsc=True,
)
print "Building for %s finished" % distribution
if __name__ == "__main__":
if len(sys.argv) == 1:
distribution = "fremantle"
else:
distribution = sys.argv[1]
build_package(distribution)
| epage/nqaap | support/builddeb.py | Python | lgpl-2.1 | 4,263 |
# Copyright (c) 2008-2010, Regents of the University of Colorado.
# This work was supported by NASA contracts NNJ05HE10G, NNC06CB40C, and
# NNC07CB47C.
# This library is free software. You can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, version 2.1 of the License.
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details. A copy of the GNU
# Lesser General Public License v 2.1 can be found in the file named
# "COPYING.LESSER". You should have received a copy of the GNU Lesser
# General Public License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301 USA.
# You may contact the Automation Group at:
# bionet@bioserve.colorado.edu
# Dr. Kevin Gifford
# University of Colorado
# Engineering Center, ECAE 1B08
# Boulder, CO 80309
# Because BioNet was developed at a university, we ask that you provide
# attribution to the BioNet authors in any redistribution, modification,
# work, or article based on this library.
# You may contribute modifications or suggestions to the University of
# Colorado for the purpose of discussing and improving this software.
# Before your modifications are incorporated into the master version
# distributed by the University of Colorado, we must have a contributor
# license agreement on file from each contributor. If you wish to supply
# the University with your modifications, please join our mailing list.
# Instructions can be found on our website at
# http://bioserve.colorado.edu/developers-corner.
from bdm_client import *
from timespan import timeval_to_int
import time
sessions = {}
bionet_resources = {}
#callbacks
def cb_lost_bdm(bdm, user_data):
None
def cb_new_bdm(bdm, user_data):
None
def cb_lost_hab(hab, user_data):
for i in range(0, bionet_hab_num_nodes(hab)):
node = bionet_hab_get_node_by_index(hab, i)
for j in range(0, bionet_node_get_num_resources(node)):
resource = bionet_node_get_resource_by_index(node, j)
pybionet_set_user_data(resource, None)
print("lost hab: " + bionet_hab_get_type(hab) + "." + bionet_hab_get_id(hab))
def cb_new_hab(hab, user_data):
print("new hab: " + bionet_hab_get_type(hab) + "." + bionet_hab_get_id(hab))
def cb_new_node(node, user_data):
hab = bionet_node_get_hab(node)
print("new node: " + bionet_node_get_name(node))
if (bionet_node_get_num_resources(node)):
print(" Resources:")
for i in range(bionet_node_get_num_resources(node)):
resource = bionet_node_get_resource_by_index(node, i)
datapoint = bionet_resource_get_datapoint_by_index(resource, 0)
if (datapoint == None):
print(" " + bionet_resource_data_type_to_string(bionet_resource_get_data_type(resource)) + " " + bionet_resource_flavor_to_string(bionet_resource_get_flavor(resource)) + " " + bionet_resource_get_id(resource) + ": (no known value)")
else:
value_str = bionet_value_to_str(bionet_datapoint_get_value(datapoint));
#%s %s %s = %s @ %s
print(" " + bionet_resource_data_type_to_string(bionet_resource_get_data_type(resource)) + " " + bionet_resource_flavor_to_string(bionet_resource_get_flavor(resource)) + " " + bionet_resource_get_id(resource) + " = " + value_str + " @ " + bionet_datapoint_timestamp_to_string(datapoint))
if (bionet_node_get_num_streams(node)):
print(" Streams:")
for i in range(bionet_node_get_num_streams(node)):
stream = bionet_node_get_stream_by_index(node, i)
print(" " + bionet_stream_get_id(stream) + " " + bionet_stream_get_type(stream) + " " + bionet_stream_direction_to_string(bionet_stream_get_direction(stream)))
def cb_lost_node(node, userdata):
hab = bionet_node_get_hab(node)
for j in range(0, bionet_node_get_num_resources(node)):
resource = bionet_node_get_resource_by_index(node, j)
pybionet_set_user_data(resource, None)
print("lost node: " + bionet_node_get_name(node))
def cb_datapoint(datapoint, userdata):
value = bionet_datapoint_get_value(datapoint);
resource = bionet_value_get_resource(value);
node = bionet_resource_get_node(resource);
hab = bionet_node_get_hab(node);
value_str = bionet_value_to_str(value);
#"%s.%s.%s:%s = %s %s %s @ %s"
#print(bionet_resource_get_name(resource) + " = " + bionet_resource_data_type_to_string(bionet_resource_get_data_type(resource)) + " " + bionet_resource_flavor_to_string(bionet_resource_get_flavor(resource)) + " " + value_str + " @ " + bionet_datapoint_timestamp_to_string(datapoint))
now = time.time()
removal = []
resource_name = bionet_resource_get_name(resource)
dp = (timeval_to_int(bionet_datapoint_get_timestamp(datapoint)), value_str)
for session_id, session in sessions.iteritems():
found = False
for r in session['resource']:
if (bionet_resource_name_matches(resource_name, r)):
for name in session['bionet-resources']:
if (name == resource_name):
u = bionet_resources[name]
if (None == u) or ('datapoints' not in u) or ('sessions' not in u): # no user data is set yet
u = { 'datapoints' : [ dp ], 'sessions' : { session_id : [ dp ] } }
bionet_resources[name] = u
print "Added datapoint to new user data"
else: # user data is set, just append to it
u['datapoints'].append(dp)
if session_id in u['sessions']:
u['sessions'][session_id].append(dp)
else:
u['sessions'][session_id] = [ dp ]
#print "Added datapoint to existing user data"
found = True
if (False == found):
session['bionet-resources'].append(resource_name)
u = { 'datapoints' : [ dp ], 'sessions' : { session_id : [ dp ] } }
bionet_resources[resource_name] = u
#print "Added datapoint to new user data of new resource"
if (now > (session['last requested'] + 600)):
# this session hasn't been requested in more than 10 minutes. remove it
removal.append(session_id)
for session_id in removal:
#print "removed subscription ", sessions[session_id]['resource']
del sessions[session_id]
# TODO: unsubscribe when bdm_unsubscribe() is implemented
| ldm5180/hammerhead | data-manager/client/bdmplot2/bdmplot2_callback.py | Python | lgpl-2.1 | 7,139 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""\
Test that Spack's shebang filtering works correctly.
"""
import os
import stat
import pytest
import tempfile
import shutil
import filecmp
from llnl.util.filesystem import mkdirp
import spack
from spack.hooks.sbang import shebang_too_long, filter_shebangs_in_directory
from spack.util.executable import which
short_line = "#!/this/is/short/bin/bash\n"
long_line = "#!/this/" + ('x' * 200) + "/is/long\n"
lua_line = "#!/this/" + ('x' * 200) + "/is/lua\n"
lua_in_text = ("line\n") * 100 + "lua\n" + ("line\n" * 100)
lua_line_patched = "--!/this/" + ('x' * 200) + "/is/lua\n"
node_line = "#!/this/" + ('x' * 200) + "/is/node\n"
node_in_text = ("line\n") * 100 + "lua\n" + ("line\n" * 100)
node_line_patched = "//!/this/" + ('x' * 200) + "/is/node\n"
sbang_line = '#!/bin/bash %s/bin/sbang\n' % spack.spack_root
last_line = "last!\n"
class ScriptDirectory(object):
"""Directory full of test scripts to run sbang instrumentation on."""
def __init__(self):
self.tempdir = tempfile.mkdtemp()
self.directory = os.path.join(self.tempdir, 'dir')
mkdirp(self.directory)
# Script with short shebang
self.short_shebang = os.path.join(self.tempdir, 'short')
with open(self.short_shebang, 'w') as f:
f.write(short_line)
f.write(last_line)
# Script with long shebang
self.long_shebang = os.path.join(self.tempdir, 'long')
with open(self.long_shebang, 'w') as f:
f.write(long_line)
f.write(last_line)
# Lua script with long shebang
self.lua_shebang = os.path.join(self.tempdir, 'lua')
with open(self.lua_shebang, 'w') as f:
f.write(lua_line)
f.write(last_line)
# Lua script with long shebang
self.lua_textbang = os.path.join(self.tempdir, 'lua_in_text')
with open(self.lua_textbang, 'w') as f:
f.write(short_line)
f.write(lua_in_text)
f.write(last_line)
# Node script with long shebang
self.node_shebang = os.path.join(self.tempdir, 'node')
with open(self.node_shebang, 'w') as f:
f.write(node_line)
f.write(last_line)
# Node script with long shebang
self.node_textbang = os.path.join(self.tempdir, 'node_in_text')
with open(self.node_textbang, 'w') as f:
f.write(short_line)
f.write(node_in_text)
f.write(last_line)
# Script already using sbang.
self.has_sbang = os.path.join(self.tempdir, 'shebang')
with open(self.has_sbang, 'w') as f:
f.write(sbang_line)
f.write(long_line)
f.write(last_line)
# Fake binary file.
self.binary = os.path.join(self.tempdir, 'binary')
tar = which('tar', required=True)
tar('czf', self.binary, self.has_sbang)
def destroy(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
@pytest.fixture
def script_dir():
sdir = ScriptDirectory()
yield sdir
sdir.destroy()
def test_shebang_handling(script_dir):
assert shebang_too_long(script_dir.lua_shebang)
assert shebang_too_long(script_dir.long_shebang)
assert not shebang_too_long(script_dir.short_shebang)
assert not shebang_too_long(script_dir.has_sbang)
assert not shebang_too_long(script_dir.binary)
assert not shebang_too_long(script_dir.directory)
filter_shebangs_in_directory(script_dir.tempdir)
# Make sure this is untouched
with open(script_dir.short_shebang, 'r') as f:
assert f.readline() == short_line
assert f.readline() == last_line
# Make sure this got patched.
with open(script_dir.long_shebang, 'r') as f:
assert f.readline() == sbang_line
assert f.readline() == long_line
assert f.readline() == last_line
# Make sure this got patched.
with open(script_dir.lua_shebang, 'r') as f:
assert f.readline() == sbang_line
assert f.readline() == lua_line_patched
assert f.readline() == last_line
# Make sure this got patched.
with open(script_dir.node_shebang, 'r') as f:
assert f.readline() == sbang_line
assert f.readline() == node_line_patched
assert f.readline() == last_line
assert filecmp.cmp(script_dir.lua_textbang,
os.path.join(script_dir.tempdir, 'lua_in_text'))
assert filecmp.cmp(script_dir.node_textbang,
os.path.join(script_dir.tempdir, 'node_in_text'))
# Make sure this is untouched
with open(script_dir.has_sbang, 'r') as f:
assert f.readline() == sbang_line
assert f.readline() == long_line
assert f.readline() == last_line
def test_shebang_handles_non_writable_files(script_dir):
# make a file non-writable
st = os.stat(script_dir.long_shebang)
not_writable_mode = st.st_mode & ~stat.S_IWRITE
os.chmod(script_dir.long_shebang, not_writable_mode)
test_shebang_handling(script_dir)
st = os.stat(script_dir.long_shebang)
assert oct(not_writable_mode) == oct(st.st_mode)
| skosukhin/spack | lib/spack/spack/test/sbang.py | Python | lgpl-2.1 | 6,406 |
from collections import OrderedDict, defaultdict, deque
from typing import List, Dict, Deque
from acbs.find import find_package
from acbs.parser import ACBSPackageInfo, check_buildability
# package information cache
pool: Dict[str, ACBSPackageInfo] = {}
def tarjan_search(packages: 'OrderedDict[str, ACBSPackageInfo]', search_path: str) -> List[List[ACBSPackageInfo]]:
"""This function describes a Tarjan's strongly connected components algorithm.
The resulting list of ACBSPackageInfo are sorted topologically as a byproduct of the algorithm
"""
# Initialize state trackers
lowlink: Dict[str, int] = defaultdict(lambda: -1)
index: Dict[str, int] = defaultdict(lambda: -1)
stackstate: Dict[str, bool] = defaultdict(bool)
stack: Deque[str] = deque()
results: List[List[ACBSPackageInfo]] = []
packages_list: List[str] = [i for i in packages]
pool.update(packages)
for i in packages_list:
if index[i] == -1: # recurse on each package that is not yet visited
strongly_connected(search_path, packages_list, results, packages,
i, lowlink, index, stackstate, stack)
return results
def prepare_for_reorder(package: ACBSPackageInfo, packages_list: List[str]) -> ACBSPackageInfo:
"""This function prepares the package for reordering.
The idea is to move the installable dependencies which are in the build list to the "uninstallable" list.
"""
new_installables = []
for d in package.installables:
# skip self-dependency
if d == package.name:
new_installables.append(d)
continue
try:
packages_list.index(d)
package.deps.append(d)
except ValueError:
new_installables.append(d)
package.installables = new_installables
return package
def strongly_connected(search_path: str, packages_list: List[str], results: list, packages: 'OrderedDict[str, ACBSPackageInfo]', vert: str, lowlink: Dict[str, int], index: Dict[str, int], stackstate: Dict[str, bool], stack: Deque[str], depth=0):
# update depth indices
index[vert] = depth
lowlink[vert] = depth
depth += 1
stackstate[vert] = True
stack.append(vert)
# search package begin
print(f'[{len(results) + 1}/{len(pool)}] {vert}\t\t\r', end='', flush=True)
current_package = packages.get(vert)
if current_package is None:
package = pool.get(vert) or find_package(vert, search_path)
if not package:
raise ValueError(
f'Package {vert} not found')
if isinstance(package, list):
for s in package:
if vert == s.name:
current_package = s
pool[s.name] = s
continue
pool[s.name] = s
packages_list.append(s.name)
else:
current_package = package
pool[vert] = current_package
assert current_package is not None
# first check if this dependency is buildable
# when `required_by` argument is present, it will raise an exception when the dependency is unbuildable.
check_buildability(current_package, stack[-2] if len(stack) > 1 else '<unknown>')
# search package end
# Look for adjacent packages (dependencies)
for p in current_package.deps:
if index[p] == -1:
# recurse on unvisited packages
strongly_connected(search_path, packages_list, results, packages,
p, lowlink, index, stackstate, stack, depth)
lowlink[vert] = min(lowlink[p], lowlink[vert])
# adjacent package is in the stack which means it is part of a loop
elif stackstate[p] is True:
lowlink[vert] = min(lowlink[p], index[vert])
w = ''
result = []
# if this is a root vertex
if lowlink[vert] == index[vert]:
# the current stack contains the vertices that belong to the same loop
# if the stack only contains one vertex, then there is no loop there
while w != vert:
w = stack.pop()
result.append(pool[w])
stackstate[w] = False
results.append(result)
| AOSC-Dev/acbs | acbs/deps.py | Python | lgpl-2.1 | 4,219 |
import random
from firedrake import *
from firedrake_adjoint import *
import sys
mesh = UnitSquareMesh(4, 4)
V3 = FunctionSpace(mesh, "CG", 3)
V2 = FunctionSpace(mesh, "CG", 2)
firedrake.parameters["adjoint"]["record_all"] = True
def main(ic, annotate=False):
soln = project(ic, V2, annotate=annotate)
return soln
if __name__ == "__main__":
ic = project(Expression("x[0]*(x[0]-1)*x[1]*(x[1]-1)"), V3)
soln = main(ic, annotate=True)
adj_html("projection_forward.html", "forward")
assert replay_dolfin(tol=1e-12, stop=True)
J = Functional(soln*soln*dx*dt[FINISH_TIME])
Jic = assemble(soln*soln*dx)
dJdic = compute_gradient(J, InitialConditionParameter(ic), forget=False)
def J(ic):
soln = main(ic, annotate=False)
return assemble(soln*soln*dx)
minconv = taylor_test(J, InitialConditionParameter(ic), Jic, dJdic)
if minconv < 1.9:
sys.exit(1)
| ellipsis14/dolfin-adjoint | tests_firedrake/projection/projection.py | Python | lgpl-3.0 | 886 |
# Copyright (C) 2015 Swift Navigation Inc.
# Contact: Mark Fine <mark@swiftnav.com>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
from .base_driver import BaseDriver
import serial
import serial.tools.list_ports
class PySerialDriver(BaseDriver):
"""
PySerialDriver
The :class:`PySerialDriver` class reads SBP messages from a serial port
using the pyserial driver. This is mostly redundant, is the Serial object's
read and write methods can be used directly.
Parameters
----------
port : string
URI to port to read SBP messages from. Accepts the following types
of URLs:
- rfc2217://<host>:<port>[/<option>[/<option>]]
- socket://<host>:<port>[/<option>[/<option>]]
- loop://[<option>[/<option>]]
and device names, such as /dev/ttyUSB0 (Linux) and COM3 (Windows). See
http://pyserial.sourceforge.net/pyserial_api.html#urls for more details.
baud : int
Baud rate of serial port (defaults to 115200)
"""
def __init__(self, port, baud=115200):
import serial
try:
handle = serial.serial_for_url(port)
handle.baudrate = baud
handle.timeout = 1
super(PySerialDriver, self).__init__(handle)
except (OSError, serial.SerialException) as e:
print
print "Error opening serial device '%s':" % port
print e
print
print "The following serial devices were detected:"
print
for (name, desc, _) in serial.tools.list_ports.comports():
if desc[0:4] == "ttyS":
continue
if name == desc:
print "\t%s" % name
else:
print "\t%s (%s)" % (name, desc)
print
raise SystemExit
def read(self, size):
"""
Read wrapper.
Parameters
----------
size : int
Number of bytes to read.
"""
try:
return self.handle.read(size)
except (OSError, serial.SerialException):
print
print "Piksi disconnected"
print
raise IOError
def write(self, s):
"""
Write wrapper.
Parameters
----------
s : bytes
Bytes to write
"""
try:
return self.handle.write(s)
except (OSError, serial.SerialException):
print
print "Piksi disconnected"
print
raise IOError
| swift-nav/libsbp | python/sbp/client/drivers/pyserial_driver.py | Python | lgpl-3.0 | 2,580 |
import pandas as pd
import numpy as np
from swiftnav.ephemeris import *
from swiftnav.single_diff import SingleDiff
from swiftnav.gpstime import *
def construct_pyobj_eph(eph):
return Ephemeris(
eph.tgd,
eph.crs, eph.crc, eph.cuc, eph.cus, eph.cic, eph.cis,
eph.dn, eph.m0, eph.ecc, eph.sqrta, eph.omega0, eph.omegadot, eph.w, eph.inc, eph.inc_dot,
eph.af0, eph.af1, eph.af2,
GpsTime(eph.toe_wn, eph.toe_tow), GpsTime(eph.toc_wn, eph.toc_tow),
eph['valid'], # this syntax is needed because the method .valid takes precedence to the field
eph.healthy,
eph.prn+1) # +1 temporarily, until i get the next dataset where this is fixed
def separate_ephs(ephs):
"""
Return a dictionary of prn to dataframe, where each dataframe is
the unique ephemerides (unique and first, as in fst . groupby) over
the time period the data was taken.
"""
sep_ephs_tuples = [(int(prn),ephs[ephs['prn'] == prn]) for prn in ephs['prn'].unique()]
sep_ephs = {}
for sep_eph_tuple in sep_ephs_tuples:
prn = sep_eph_tuple[0]+1 #temporarily, just for the dataset before i started storing them correctly TODO FIX
frame = pd.DataFrame(sep_eph_tuple[1].drop_duplicates().apply(construct_pyobj_eph, axis=1), columns=['ephemeris'])
# frame = pd.DataFrame(sep_eph_tuple[1].apply(construct_pyobj_eph, axis=1), columns=['ephemeris'])
frame['time'] = frame.index
sep_ephs[prn] = frame
return sep_ephs
def merge_into_sdiffs(ephs, sd):
"""
Taking ephemerides and observation data, this will merge them
together into a panel whose index is a sat, major axis is time,
and minor axis is everything needed for an sdiff struct.
It's super slow, so I left it all in pandas format, so we can
save it out in hdf5 and get it back all nicely processed.
"""
sep_ephs = separate_ephs(ephs)
sats = sd.items
num_sats = map(lambda x: int(x[1:]),sats)
sdiff_dict = {}
for sat in sats:
# sat = sats[0]
sat_ephs = sep_ephs[int(sat[1:])]
fst_eph = sat_ephs.ix[0].ephemeris
obs = sd[sat]
obs['time'] = obs.index
def make_single_diff(x):
if np.isnan(x.C1) or np.isnan(x.L1) or np.isnan(x.S1_1) or np.isnan(x.S1_2):
return pd.Series([np.nan]*11,
index=['C1', 'L1', 'D1', 'sat_pos_x', 'sat_pos_y', 'sat_pos_z',
'sat_vel_x', 'sat_vel_y', 'sat_vel_z', 'min_snr', 'prn'])
c1 = x.C1
l1 = x.L1
snr = min(x.S1_1, x.S1_2)
timestamp = x.time
earlier_ephs = sat_ephs[sat_ephs['time'] <= timestamp]
if earlier_ephs.shape[0] >= 1:
eph = earlier_ephs.ix[-1].ephemeris
else:
eph = fst_eph
gpstime = datetime2gpst(timestamp)
pos, vel, clock_err, clock_rate_err = calc_sat_pos(eph, gpstime)
return pd.Series([c1, l1, np.nan, pos[0], pos[1], pos[2], vel[0], vel[1], vel[2], snr, int(sat[1:])],
index=['C1', 'L1', 'D1', 'sat_pos_x', 'sat_pos_y', 'sat_pos_z',
'sat_vel_x', 'sat_vel_y', 'sat_vel_z', 'min_snr', 'prn'])
sdiffs = obs.apply(make_single_diff,axis=1).dropna(how='all',axis=0)
sdiff_dict[sat] = sdiffs
return pd.Panel(sdiff_dict)
def main():
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("ephemeris",
help="the ephemeris file to process")
parser.add_argument("input",
help="the HDF5 file to process")
parser.add_argument("base_name", default=False,
help="the marker name of the base station")
parser.add_argument("rover_name", default=False,
help="the marker name of the rover")
args = parser.parse_args()
eph_file = pd.HDFStore(args.ephemeris)
eph = eph_file['eph']
h5 = pd.HDFStore(args.input)
sd_table = h5['sd_%s_%s' % (args.rover_name, args.base_name)]
output_table_name = 'sdiff_%s_%s' % (args.rover_name, args.base_name)
h5[output_table_name] = merge_into_sdiffs(eph, sd_table)
h5.close()
if __name__ == '__main__':
main()
| imh/gnss-analysis | gnss_analysis/mk_sdiffs.py | Python | lgpl-3.0 | 4,406 |
#!/usr/bin/python
import subprocess
code_dir = "."
subprocess.call([code_dir+"/release/simpleReact_moving_interface.out","10000","0.1"])
subprocess.call(["gnuplot",code_dir+"/scripts/simpleReact_moving_interface.gnu"])
subprocess.call(["cp",code_dir+"/simpleReact_moving_interface.eps",code_dir+"/plots/"])
| martinjrobins/RD_3D | scripts/simpleReact_moving_interface.py | Python | lgpl-3.0 | 311 |
import inspect
import traceback
import warnings
import functools
from shyft.api._api import *
import numpy as np
from math import sqrt
def deprecated(message: str = ''):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used first time and filter is set for show DeprecationWarning.
"""
def decorator_wrapper(func):
@functools.wraps(func)
def function_wrapper(*args, **kwargs):
current_call_source = '|'.join(traceback.format_stack(inspect.currentframe()))
if current_call_source not in function_wrapper.last_call_source:
warnings.warn("Class.method {} is now deprecated! {}".format(func, message),
category=DeprecationWarning, stacklevel=2)
function_wrapper.last_call_source.add(current_call_source)
return func(*args, **kwargs)
function_wrapper.last_call_source = set()
return function_wrapper
return decorator_wrapper
# Fix up vector types
DoubleVector.size = lambda self: len(self)
DoubleVector_FromNdArray = lambda x: DoubleVector.from_numpy(x)
def VectorString(v):
return str(v.to_numpy())
DoubleVector.__str__ = lambda self: VectorString(self)
Calendar.__str__ = lambda self: "Calendar('{0}')".format(self.tz_info.name())
def ShowUtcTime(v):
utc = Calendar()
return "[" + ",".join([cal.to_string(t) for t in v]) + "]"
UtcTimeVector.size = lambda self: len(self)
UtcTimeVector.__str__ = lambda self: ShowUtcTime
IntVector.size = lambda self: len(self)
IntVector.__str__ = lambda self: VectorString(self)
StringVector.size = lambda self: len(self)
#ByteVector to/from string
ByteVector.__str__ = lambda self: byte_vector_to_hex_str(self)
ByteVector.from_str = byte_vector_from_hex_str
# fix up BW and pythonic syntax for TsVector
TsVector.size = lambda self: len(self)
TsVector.push_back = lambda self, ts: self.append(ts)
# and this is for bw.compat
def percentiles(tsv:TsVector,time_axis:TimeAxis,percentile_list:IntVector)->TsVector:
return tsv.percentiles(time_axis,percentile_list)
TargetSpecificationVector.size = lambda self: len(self)
# fix bw. stl name
UtcTimeVector.push_back = lambda self, x: self.append(x)
IntVector.push_back = lambda self, x: self.append(x)
DoubleVector.push_back = lambda self, x: self.append(x)
StringVector.push_back = lambda self, x: self.append(x)
# FIx up YMDhms
YMDhms.__str__ = lambda self: "YMDhms({0},{1},{2},{3},{4},{5})".format(self.year, self.month, self.day, self.hour,
self.minute, self.second)
YMDhms.__repr__ = lambda self: "{0}({1},{2},{3},{4},{5},{6})".format(self.__class__.__name__,
self.year, self.month, self.day, self.hour,
self.minute, self.second)
YWdhms.__str__ = lambda self: "YWdhms({0},{1},{2},{3},{4},{5})".format(self.iso_year, self.iso_week, self.week_day, self.hour,
self.minute, self.second)
YWdhms.__repr__ = lambda self: "{0}({1},{2},{3},{4},{5},{6})".format(self.__class__.__name__,
self.iso_year, self.iso_week, self.week_day, self.hour,
self.minute, self.second)
# Fix up GeoPoint
GeoPoint.__str__ = lambda self: "GeoPoint({0},{1},{2})".format(self.x, self.y, self.z)
GeoPoint_difference = lambda a, b: GeoPoint.difference(a, b)
GeoPoint_xy_distance = lambda a, b: GeoPoint.xy_distance(a, b)
# Fix up LandTypeFractions
LandTypeFractions.__str__ = lambda \
self: "LandTypeFractions(glacier={0},lake={1},reservoir={2},forest={3},unspecified={4})".format(self.glacier(),
self.lake(),
self.reservoir(),
self.forest(),
self.unspecified())
# Fix up GeoCellData
def StrGeoCellData(gcd):
return "GeoCellData(mid_point={0},catchment_id={1},area={2},ltf={3})".format(str(gcd.mid_point()),
gcd.catchment_id(), gcd.area(),
str(gcd.land_type_fractions_info()))
GeoCellData.__str__ = lambda self: StrGeoCellData(self)
# Fix up UtcPeriod
UtcPeriod.to_string = lambda self: str(self)
# Fix up TimeAxis
def ta_iter(x):
x.counter = 0
return x
def ta_next(ta):
if ta.counter >= len(ta):
del ta.counter
raise StopIteration
ta.counter += 1
return ta(ta.counter - 1)
TimeAxisFixedDeltaT.__str__ = lambda self: "TimeAxisFixedDeltaT({0},{1},{2})".format(Calendar().to_string(self.start), self.delta_t, self.n)
TimeAxisFixedDeltaT.__len__ = lambda self: self.size()
TimeAxisFixedDeltaT.__call__ = lambda self, i: self.period(i)
TimeAxisFixedDeltaT.__iter__ = lambda self: ta_iter(self)
TimeAxisFixedDeltaT.__next__ = lambda self: ta_next(self)
TimeAxisCalendarDeltaT.__str__ = lambda self: "TimeAxisCalendarDeltaT(Calendar('{3}'),{0},{1},{2})".format(Calendar().to_string(self.start), self.delta_t, self.n,self.calendar.tz_info.name())
TimeAxisCalendarDeltaT.__len__ = lambda self: self.size()
TimeAxisCalendarDeltaT.__call__ = lambda self, i: self.period(i)
TimeAxisCalendarDeltaT.__iter__ = lambda self: ta_iter(self)
TimeAxisCalendarDeltaT.__next__ = lambda self: ta_next(self)
TimeAxisByPoints.__str__ = lambda self: "TimeAxisByPoints(total_period={0}, n={1},points={2} )".format(str(self.total_period()),len(self),repr(TimeAxis(self).time_points))
TimeAxisByPoints.__len__ = lambda self: self.size()
TimeAxisByPoints.__call__ = lambda self, i: self.period(i)
TimeAxisByPoints.__iter__ = lambda self: ta_iter(self)
TimeAxisByPoints.__next__ = lambda self: ta_next(self)
def nice_ta_string(time_axis):
if time_axis.timeaxis_type == TimeAxisType.FIXED:
return '{0}'.format(str(time_axis.fixed_dt))
if time_axis.timeaxis_type == TimeAxisType.CALENDAR:
return '{0}'.format(str(time_axis.calendar_dt))
return '{0}'.format(str(time_axis.point_dt))
TimeAxis.__str__ = lambda self: nice_ta_string(self)
TimeAxis.__len__ = lambda self: self.size()
TimeAxis.__call__ = lambda self, i: self.period(i)
TimeAxis.__iter__ = lambda self: ta_iter(self)
TimeAxis.__next__ = lambda self: ta_next(self)
TimeAxis.time_points = property( lambda self: time_axis_extract_time_points(self).to_numpy(),doc= \
"""
extract all time-points from a TimeAxis
like
[ time_axis.time(i) ].append(time_axis.total_period().end) if time_axis.size() else []
Parameters
----------
time_axis : TimeAxis
Returns
-------
time_points:numpy.array(dtype=np.int64)
[ time_axis.time(i) ].append(time_axis.total_period().end)
""")
# fix up property on timeseries
TimeSeries.time_axis = property(lambda self: self.get_time_axis(), doc="returns the time_axis of the timeseries")
TimeSeries.__len__ = lambda self: self.size()
TimeSeries.v = property(lambda self: self.values, doc="returns the point-values of timeseries, alias for .values")
TimeSeries.kling_gupta = lambda self, other_ts, s_r=1.0, s_a=1.0, s_b=1.0: kling_gupta(self, other_ts,
self.get_time_axis(), s_r, s_a,
s_b)
TimeSeries.kling_gupta.__doc__ = \
"""
computes the kling_gupta correlation using self as observation, and self.time_axis as
the comparison time-axis
Parameters
----------
other_ts : Timeseries
the predicted/calculated time-series to correlate
s_r : float
the kling gupta scale r factor(weight the correlation of goal function)
s_a : float
the kling gupta scale a factor(weight the relative average of the goal function)
s_b : float
the kling gupta scale b factor(weight the relative standard deviation of the goal function)
Return
------
KGEs : float
"""
TimeSeries.nash_sutcliffe = lambda self, other_ts: nash_sutcliffe(self, other_ts, self.get_time_axis())
TimeSeries.nash_sutcliffe.__doc__ = \
"""
Computes the Nash-Sutcliffe model effiency coefficient (n.s)
for the two time-series over the specified time_axis
Ref: http://en.wikipedia.org/wiki/Nash%E2%80%93Sutcliffe_model_efficiency_coefficient
Parameters
----------
observed_ts : TimeSeries
the observed time-series
model_ts : TimeSeries
the time-series that is the model simulated / calculated ts
time_axis : TimeAxis
the time-axis that is used for the computation
Return
------
float: The n.s performance, that have a maximum at 1.0
"""
TsFixed.values = property(lambda self: self.v, doc="returns the point values, .v of the timeseries")
TsFixed.time_axis = property(lambda self: self.get_time_axis(), doc="returns the time_axis of the timeseries")
TsPoint.values = property(lambda self: self.v, doc="returns the point values, .v of the timeseries")
TsPoint.time_axis = property(lambda self: self.get_time_axis(), doc="returns the time_axis of the timeseries")
# some minor fixup to ease work with core-time-series vs TimeSeries
TsFixed.TimeSeries = property(lambda self: TimeSeries(self),doc="return a fully featured TimeSeries from the core TsFixed ")
TsFixed.nash_sutcliffe = lambda self, other_ts: nash_sutcliffe(self.TimeSeries, other_ts, TimeAxis(self.get_time_axis()))
TsFixed.kling_gupta = lambda self, other_ts, s_r=1.0, s_a=1.0, s_b=1.0: kling_gupta(self.TimeSeries, other_ts,
TimeAxis(self.get_time_axis()), s_r, s_a,
s_b)
TsPoint.TimeSeries = property(lambda self: TimeSeries(self.get_time_axis(),self.v,self.point_interpretation()),doc="return a fully featured TimeSeries from the core TsPoint")
TsPoint.nash_sutcliffe = lambda self, other_ts: nash_sutcliffe(self.TimeSeries, other_ts, TimeAxis(self.get_time_axis()))
TsPoint.kling_gupta = lambda self, other_ts, s_r=1.0, s_a=1.0, s_b=1.0: kling_gupta(self.TimeSeries, other_ts,
TimeAxis(self.get_time_axis()), s_r, s_a,
s_b)
# Fix up ARegionEnvironment
TemperatureSource.vector_t = TemperatureSourceVector
PrecipitationSource.vector_t = PrecipitationSourceVector
RadiationSource.vector_t = RadiationSourceVector
RelHumSource.vector_t = RelHumSourceVector
WindSpeedSource.vector_t = WindSpeedSourceVector
def np_array(dv:DoubleVector):
"""
convert flattened double-vector to numpy array
Parameters
----------
dv
Returns
-------
numpy array.
"""
f = dv.to_numpy()
n = int(sqrt(dv.size()))
m = f.reshape(n, n)
return m
# fixup kalman state
KalmanState.x = property(lambda self: KalmanState.get_x(self).to_numpy(),
doc="represents the current bias estimate, kalman.state.x")
KalmanState.k = property(lambda self: KalmanState.get_k(self).to_numpy(),
doc="represents the current kalman gain factors, kalman.state.k")
KalmanState.P = property(lambda self: np_array(KalmanState.get_P(self)),
doc="returns numpy array of kalman.state.P, the nxn covariance matrix")
KalmanState.W = property(lambda self: np_array(KalmanState.get_W(self)),
doc="returns numpy array of kalman.state.W, the nxn noise matrix")
# fixup KalmanBiasPredictor
def KalmanBiasPredictor_update_with_forecast(bp, fc_set, obs, time_axis):
"""
Parameters
----------
bp
fc_set : TemperatureSourceVector or TsVector
obs : TimeSeries
time_axis : TimeAxis
Returns
-------
nothing
"""
if isinstance(fc_set, TemperatureSourceVector):
KalmanBiasPredictor.update_with_geo_forecast(bp, fc_set, obs, time_axis)
else:
KalmanBiasPredictor.update_with_forecast_vector(bp, fc_set, obs, time_axis)
def KalmanBiasPredictor_compute_running_bias(bp, fc_ts, obs_ts, time_axis):
"""
compute the running bias timeseries,
using one 'merged' - forecasts and one observation time - series.
Before each day - period, the bias - values are copied out to form
a continuous bias prediction time-series.
Parameters
----------
bias_predictor : KalmanBiasPredictor
The bias predictor object it self
forecast_ts : TimeSeries
a merged forecast ts
with period covering the observation_ts and time_axis supplied
observation ts: TimeSeries
the observation time-series
time_axis : TimeAxis
covering the period/timesteps to be updated
e.g. yesterday, 3h resolution steps, according to the points in the filter
Returns
-------
bias_ts : TimeSeries(time_axis,bias_vector,POINT_AVERAGE)
computed running bias-ts
"""
return KalmanBiasPredictor.compute_running_bias_ts(bp, fc_ts, obs_ts, time_axis)
KalmanBiasPredictor.update_with_forecast = KalmanBiasPredictor_update_with_forecast
KalmanBiasPredictor.compute_running_bias = KalmanBiasPredictor_compute_running_bias
def ts_vector_values_at_time(tsv:TsVector, t:int):
if not isinstance(tsv, TsVector):
if not isinstance(tsv, list):
raise RuntimeError('Supplied list of timeseries must be of type TsVector or list(TimeSeries)')
list_of_ts = tsv
tsv = TsVector()
for ts in list_of_ts:
tsv.append(ts)
return tsv.values_at(t).to_numpy()
ts_vector_values_at_time.__doc__ = TsVector.values_at.__doc__.replace('DoubleVector','ndarray').replace('TsVector','TsVector or list(TimeSeries)')
TsVector.values_at_time = ts_vector_values_at_time
TsVector.values_at_time.__doc__ = TsVector.values_at.__doc__.replace('DoubleVector','ndarray')
def geo_point_source_vector_values_at_time(gtsv:GeoPointSourceVector, t:int):
#if not isinstance(gtsv, GeoPointSourceVector):
# raise RuntimeError('Supplied list of timeseries must be of GeoPointSourceVector')
return compute_geo_ts_values_at_time(gtsv, t).to_numpy()
GeoPointSourceVector.values_at_time = geo_point_source_vector_values_at_time
GeoPointSourceVector.values_at_time.__doc__ = compute_geo_ts_values_at_time.__doc__.replace('DoubleVector','ndarray')
RadiationSourceVector.values_at_time = GeoPointSourceVector.values_at_time
PrecipitationSourceVector.values_at_time = GeoPointSourceVector.values_at_time
TemperatureSourceVector.values_at_time = GeoPointSourceVector.values_at_time
RelHumSourceVector.values_at_time = GeoPointSourceVector.values_at_time
WindSpeedSourceVector.values_at_time = GeoPointSourceVector.values_at_time
| felixmatt/shyft | shyft/api/__init__.py | Python | lgpl-3.0 | 15,456 |
# (C) British Crown Copyright 2015 - 2016, Met Office
#
# This file is part of iris-grib.
#
# iris-grib is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# iris-grib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with iris-grib. If not, see <http://www.gnu.org/licenses/>.
"""
Unit tests for
:func:`iris_grib._load_convert.grid_definition_template_40`.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris_grib.tests first so that some things can be initialised
# before importing anything else.
import iris_grib.tests as tests
import numpy as np
import iris.coord_systems
import iris.coords
from iris_grib.tests.unit.load_convert import empty_metadata
from iris_grib._load_convert import grid_definition_template_40
MDI = 2 ** 32 - 1
class _Section(dict):
def get_computed_key(self, key):
return self.get(key)
class Test_regular(tests.IrisGribTest):
def section_3(self):
section = _Section({
'shapeOfTheEarth': 0,
'scaleFactorOfRadiusOfSphericalEarth': 0,
'scaledValueOfRadiusOfSphericalEarth': 6367470,
'scaleFactorOfEarthMajorAxis': 0,
'scaledValueOfEarthMajorAxis': MDI,
'scaleFactorOfEarthMinorAxis': 0,
'scaledValueOfEarthMinorAxis': MDI,
'iDirectionIncrement': 22500000,
'longitudeOfFirstGridPoint': 0,
'Ni': 16,
'scanningMode': 0b01000000,
'distinctLatitudes': np.array([-73.79921363, -52.81294319,
-31.70409175, -10.56988231,
10.56988231, 31.70409175,
52.81294319, 73.79921363]),
'numberOfOctectsForNumberOfPoints': 0,
'interpretationOfNumberOfPoints': 0,
})
return section
def expected(self, y_dim, x_dim, y_neg=True):
# Prepare the expectation.
expected = empty_metadata()
cs = iris.coord_systems.GeogCS(6367470)
nx = 16
dx = 22.5
x_origin = 0
x = iris.coords.DimCoord(np.arange(nx) * dx + x_origin,
standard_name='longitude',
units='degrees_east',
coord_system=cs,
circular=True)
y_points = np.array([73.79921363, 52.81294319,
31.70409175, 10.56988231,
-10.56988231, -31.70409175,
-52.81294319, -73.79921363])
if not y_neg:
y_points = y_points[::-1]
y = iris.coords.DimCoord(y_points,
standard_name='latitude',
units='degrees_north',
coord_system=cs)
expected['dim_coords_and_dims'].append((y, y_dim))
expected['dim_coords_and_dims'].append((x, x_dim))
return expected
def test(self):
section = self.section_3()
metadata = empty_metadata()
grid_definition_template_40(section, metadata)
expected = self.expected(0, 1, y_neg=False)
self.assertEqual(metadata, expected)
def test_transposed(self):
section = self.section_3()
section['scanningMode'] = 0b01100000
metadata = empty_metadata()
grid_definition_template_40(section, metadata)
expected = self.expected(1, 0, y_neg=False)
self.assertEqual(metadata, expected)
def test_reverse_latitude(self):
section = self.section_3()
section['scanningMode'] = 0b00000000
metadata = empty_metadata()
grid_definition_template_40(section, metadata)
expected = self.expected(0, 1, y_neg=True)
self.assertEqual(metadata, expected)
class Test_reduced(tests.IrisGribTest):
def section_3(self):
section = _Section({
'shapeOfTheEarth': 0,
'scaleFactorOfRadiusOfSphericalEarth': 0,
'scaledValueOfRadiusOfSphericalEarth': 6367470,
'scaleFactorOfEarthMajorAxis': 0,
'scaledValueOfEarthMajorAxis': MDI,
'scaleFactorOfEarthMinorAxis': 0,
'scaledValueOfEarthMinorAxis': MDI,
'longitudes': np.array([0., 180.,
0., 120., 240.,
0., 120., 240.,
0., 180.]),
'latitudes': np.array([-59.44440829, -59.44440829,
-19.87571915, -19.87571915, -19.87571915,
19.87571915, 19.87571915, 19.87571915,
59.44440829, 59.44440829]),
'numberOfOctectsForNumberOfPoints': 1,
'interpretationOfNumberOfPoints': 1,
})
return section
def expected(self):
# Prepare the expectation.
expected = empty_metadata()
cs = iris.coord_systems.GeogCS(6367470)
x_points = np.array([0., 180.,
0., 120., 240.,
0., 120., 240.,
0., 180.])
y_points = np.array([-59.44440829, -59.44440829,
-19.87571915, -19.87571915, -19.87571915,
19.87571915, 19.87571915, 19.87571915,
59.44440829, 59.44440829])
x = iris.coords.AuxCoord(x_points,
standard_name='longitude',
units='degrees_east',
coord_system=cs)
y = iris.coords.AuxCoord(y_points,
standard_name='latitude',
units='degrees_north',
coord_system=cs)
expected['aux_coords_and_dims'].append((y, 0))
expected['aux_coords_and_dims'].append((x, 0))
return expected
def test(self):
section = self.section_3()
metadata = empty_metadata()
expected = self.expected()
grid_definition_template_40(section, metadata)
self.assertEqual(metadata, expected)
if __name__ == '__main__':
tests.main()
| pp-mo/iris-grib | iris_grib/tests/unit/load_convert/test_grid_definition_template_40.py | Python | lgpl-3.0 | 6,813 |
# This file is part of KTBS <http://liris.cnrs.fr/sbt-dev/ktbs>
# Copyright (C) 2011-2012 Pierre-Antoine Champin <pchampin@liris.cnrs.fr> /
# Universite de Lyon <http://www.universite-lyon.fr>
#
# KTBS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KTBS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with KTBS. If not, see <http://www.gnu.org/licenses/>.
"""
This kTBS plugin allows to run the profiler on a per-request basis.
To enable the profiler, simply add 'profiler' as a URL parameter.
It will generate a temporary file named with the datetime and the URL,
which you can view with bin/view-profiler-dat, or a tool like
https://github.com/pwaller/pyprof2calltree.
Note that the profiler does not run the exact same code as the normal code:
it converts to a list the iterable returned by the WSGI application,
to ensure that all the code is actually ran in the profiler
(rather than differed by a generator).
"""
from cProfile import runctx as run_in_profiler
from datetime import datetime
from os.path import join
from tempfile import gettempdir
from webob import Request
import logging
LOG = logging.getLogger(__name__)
from rdfrest.http_server import \
register_middleware, unregister_middleware, MyResponse, TOP
class ProfilerMiddleware(object):
DIRECTORY = gettempdir()
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
req = Request(environ)
params = environ['rdfrest.parameters']
do_profiling = params.pop('profiler', None)
if do_profiling is None:
resp = req.get_response(self.app)
return resp(environ, start_response)
else:
filename = '%s--%s--%s.profiler.dat' % (
datetime.now().strftime('%Y-%m-%d-%H:%M:%S'),
req.method,
req.url.replace('/', '_'),
)
my_globals = dict(globals())
run_in_profiler("""
global RET
resp = req.get_response(self.app)
RET = list(resp(environ, start_response))
""",
my_globals, locals(),
join(self.DIRECTORY, filename))
return my_globals['RET']
def start_plugin(config):
if config.has_section('profiler') and config.has_option('profiler', 'directory'):
ProfilerMiddleware.DIRECTORY = config.get('profiler', 'directory')
register_middleware(TOP, ProfilerMiddleware)
def stop_plugin():
unregister_middleware(ProfilerMiddleware)
| ktbs/ktbs | lib/ktbs/plugins/profiler.py | Python | lgpl-3.0 | 2,998 |
#!../venv/bin/python
import sys
print (sys.argv[1:])
| thinkl33t/mqtt2telegram | scripts/test.py | Python | lgpl-3.0 | 54 |
"""
test package copying
"""
import shutil
import time
import os.path
import os
from rez.system import system
from rez.build_process_ import create_build_process
from rez.build_system import create_build_system
from rez.resolved_context import ResolvedContext
from rez.packages_ import get_latest_package
from rez.package_copy import copy_package
from rez.vendor.version.version import VersionRange
from rez.tests.util import TestBase, TempdirMixin
class TestCopyPackage(TestBase, TempdirMixin):
@classmethod
def setUpClass(cls):
TempdirMixin.setUpClass()
path = os.path.dirname(__file__)
packages_path = os.path.join(path, "data", "builds", "packages")
cls.src_root = os.path.join(cls.root, "src", "packages")
cls.install_root = os.path.join(cls.root, "packages")
shutil.copytree(packages_path, cls.src_root)
# repo we will copy packages into
cls.dest_install_root = os.path.join(cls.root, "dest_packages")
# include modules
pypath = os.path.join(path, "data", "python", "late_bind")
cls.settings = dict(
packages_path=[cls.install_root],
package_filter=None,
package_definition_python_path=pypath,
resolve_caching=False,
warn_untimestamped=False,
warn_old_commands=False,
implicit_packages=[])
@classmethod
def tearDownClass(cls):
TempdirMixin.tearDownClass()
def setup_once(self):
# build packages used by this test
self._build_package("build_util", "1")
self._build_package("floob")
self._build_package("foo", "1.0.0")
self._build_package("foo", "1.1.0")
self._build_package("bah", "2.1")
@classmethod
def _create_builder(cls, working_dir):
buildsys = create_build_system(working_dir)
return create_build_process(process_type="local",
working_dir=working_dir,
build_system=buildsys)
@classmethod
def _build_package(cls, name, version=None):
# create the builder
working_dir = os.path.join(cls.src_root, name)
if version:
working_dir = os.path.join(working_dir, version)
builder = cls._create_builder(working_dir)
builder.build(install_path=cls.install_root, install=True, clean=True)
def _reset_dest_repository(self):
system.clear_caches()
if os.path.exists(self.dest_install_root):
shutil.rmtree(self.dest_install_root)
os.makedirs(self.dest_install_root)
def _get_src_pkg(self, name, version):
return get_latest_package(
name,
range_=VersionRange("==" + version),
paths=[self.install_root],
error=True
)
def _get_dest_pkg(self, name, version):
return get_latest_package(
name,
range_=VersionRange("==" + version),
paths=[self.dest_install_root],
error=True
)
def _assert_copied(self, result, copied, skipped):
self.assertEqual(len(result["copied"]), copied)
self.assertEqual(len(result["skipped"]), skipped)
def test_1(self):
"""Simple package copy, no variants, no overwrite."""
self._reset_dest_repository()
# make a copy of a package
src_pkg = self._get_src_pkg("floob", "1.2.0")
result = copy_package(
package=src_pkg,
dest_repository=self.dest_install_root
)
self._assert_copied(result, 1, 0)
# check the copied package exists and matches
dest_pkg = self._get_dest_pkg("floob", "1.2.0")
result_variant = result["copied"][0][1]
dest_variant = dest_pkg.iter_variants().next()
self.assertEqual(dest_variant.handle, result_variant.handle)
pyfile = os.path.join(dest_pkg.base, "python")
ctime = os.stat(pyfile).st_ctime
# copy again but with overwrite=False; should do nothing
result = copy_package(
package=src_pkg,
dest_repository=self.dest_install_root
)
self._assert_copied(result, 0, 1)
# check that package payload wasn't overwritten
self.assertEqual(os.stat(pyfile).st_ctime, ctime)
def test_2(self):
"""Package copy, no variants, overwrite."""
self._reset_dest_repository()
# make a copy of a package
src_pkg = self._get_src_pkg("floob", "1.2.0")
copy_package(
package=src_pkg,
dest_repository=self.dest_install_root
)
dest_pkg = self._get_dest_pkg("floob", "1.2.0")
pyfile = os.path.join(dest_pkg.base, "python")
ctime = os.stat(pyfile).st_ctime
# overwrite same package copy
result = copy_package(
package=src_pkg,
dest_repository=self.dest_install_root,
overwrite=True
)
self._assert_copied(result, 1, 0)
# check that package payload was overwritten
self.assertNotEqual(os.stat(pyfile).st_ctime, ctime)
def test_3(self):
"""Package copy, variants, overwrite and non-overwrite."""
self._reset_dest_repository()
# make a copy of a varianted package
src_pkg = self._get_src_pkg("bah", "2.1")
result = copy_package(
package=src_pkg,
dest_repository=self.dest_install_root
)
self._assert_copied(result, 2, 0) # 2 variants
# check the copied variants exist and match
dest_pkg = self._get_dest_pkg("bah", "2.1")
ctimes = []
for index in (0, 1):
result_variant = result["copied"][index][1]
dest_variant = dest_pkg.get_variant(index)
self.assertEqual(dest_variant.handle, result_variant.handle)
pyfile = os.path.join(dest_variant.root, "python")
ctime = os.stat(pyfile).st_ctime
ctimes.append(ctime)
# copy variant with no overwrite, should do nothing
result = copy_package(
package=src_pkg,
dest_repository=self.dest_install_root,
variants=[1]
)
self._assert_copied(result, 0, 1)
# copy variant with overwrite
result = copy_package(
package=src_pkg,
dest_repository=self.dest_install_root,
variants=[1],
overwrite=True
)
self._assert_copied(result, 1, 0)
# check copied variant is the one we expect
dest_pkg = self._get_dest_pkg("bah", "2.1")
result_variant = result["copied"][0][1]
dest_variant = dest_pkg.get_variant(1)
self.assertEqual(dest_variant.handle, result_variant.handle)
# check copied variant payload was overwritten
pyfile = os.path.join(dest_variant.root, "python")
self.assertNotEqual(os.stat(pyfile).st_ctime, ctimes[1])
# check non-copied variant payload was not written
skipped_variant = dest_pkg.get_variant(0)
pyfile = os.path.join(skipped_variant.root, "python")
self.assertEqual(os.stat(pyfile).st_ctime, ctimes[0])
def test_4(self):
"""Package copy with rename, reversion."""
self._reset_dest_repository()
# copy a package to a different name and version
src_pkg = self._get_src_pkg("floob", "1.2.0")
result = copy_package(
package=src_pkg,
dest_repository=self.dest_install_root,
dest_name="flaab",
dest_version="5.4.1"
)
self._assert_copied(result, 1, 0)
# check copied variant is the one we expect
dest_pkg = self._get_dest_pkg("flaab", "5.4.1")
result_variant = result["copied"][0][1]
dest_variant = dest_pkg.iter_variants().next()
self.assertEqual(dest_variant.handle, result_variant.handle)
def test_5(self):
"""Package copy with standard, new timestamp."""
self._reset_dest_repository()
# wait 1 second to guarantee newer timestamp in copied pkg
time.sleep(1)
# copy package and overwrite timestamp
src_pkg = self._get_src_pkg("floob", "1.2.0")
copy_package(
package=src_pkg,
dest_repository=self.dest_install_root
)
# check copied variant contains expected timestamp
dest_pkg = self._get_dest_pkg("floob", "1.2.0")
self.assertTrue(dest_pkg.timestamp > src_pkg.timestamp)
def test_6(self):
"""Package copy with keep_timestamp."""
self._reset_dest_repository()
# wait 1 second to ensure we don't just accidentally get same timestamp
time.sleep(1)
# copy package and overwrite timestamp
src_pkg = self._get_src_pkg("floob", "1.2.0")
copy_package(
package=src_pkg,
dest_repository=self.dest_install_root,
keep_timestamp=True
)
# check copied variant contains expected timestamp
dest_pkg = self._get_dest_pkg("floob", "1.2.0")
self.assertEqual(dest_pkg.timestamp, src_pkg.timestamp)
def test_7(self):
"""Package copy with overrides."""
self._reset_dest_repository()
overrides = {
"timestamp": 10000,
"description": "this is a copy",
"some_extra_key": True
}
# copy package and overwrite timestamp
src_pkg = self._get_src_pkg("floob", "1.2.0")
copy_package(
package=src_pkg,
dest_repository=self.dest_install_root,
overrides=overrides
)
# check copied variant contains expected timestamp
dest_pkg = self._get_dest_pkg("floob", "1.2.0")
for k, v in overrides.iteritems():
self.assertEqual(getattr(dest_pkg, k), v)
def test_8(self):
"""Ensure that include modules are copied."""
self._reset_dest_repository()
src_pkg = self._get_src_pkg("foo", "1.1.0")
copy_package(
package=src_pkg,
dest_repository=self.dest_install_root,
)
dest_pkg = self._get_dest_pkg("foo", "1.1.0")
dest_variant = dest_pkg.iter_variants().next()
# do a resolve
ctxt = ResolvedContext(
["foo==1.1.0"],
package_paths=[self.dest_install_root, self.install_root]
)
resolved_variant = ctxt.get_resolved_package("foo")
self.assertEqual(dest_variant.handle, resolved_variant.handle)
# this can only match if the include module was copied with the package
environ = ctxt.get_environ(parent_environ={})
self.assertEqual(environ.get("EEK"), "2")
| cwmartin/rez | src/rez/tests/test_copy_package.py | Python | lgpl-3.0 | 10,779 |
# TODO: provide a transition checker that prevents a feedback loop, inconsistent state.
# in user db that way user can eliminate store step on the receive side.
from charm.core.engine.util import *
from charm.toolbox.enum import Enum
from math import log, ceil
debug = False
# standardize responses between client and server
# code = Enum('Success', 'Fail', 'Repeat', 'StartSubprotocol', 'EndSubprotocol')
class Protocol:
def __init__(self, error_states, max_size=2048): # any init information?
global error
self.p_ID = 0
self.p_ctr = 0
error = error_states
# dictionary of party types (each type gets an identifier)
self.partyTypes = {}
self.party = {}
self._serialize = False
self.db = {} # initialize the database
self.max_size = max_size
self.prefix_size = ceil(log(max_size, 256))
def setup(self, *args):
# handles the hookup between parties involved
Error = True
for arg in args:
if isinstance(arg, dict):
print("Setup of: ", arg['name'])
if not self.addInstance(arg): Error = False
else:
print(type(arg))
return Error
def addInstance(self, obj):
p_ctr = self.p_ctr
for i in self.partyTypes.keys():
if i == obj['type']: # we find the party type
self.party[p_ctr] = {}
self.party[p_ctr]['name'], self.party[p_ctr]['socket'] = obj['name'], obj['socket']
self.party[p_ctr]['type'], self.party[p_ctr]['states'] = obj['type'], self.partyTypes[i]['states']
self.party[p_ctr]['init'] = self.partyTypes[i]['init']
self.p_ctr += 1
print("Adding party instance w/ id: ", p_ctr)
return True
return None
def addPartyType(self, type, state_map, trans_map, init_state=False):
ExistingTypeFound = False
# see if type already exists. break and return if so
for i in self.partyTypes.keys():
if self.partyTypes[i]['type'] == type:
ExistingTypeFound = True
break
# means we are adding a new type
if not ExistingTypeFound:
p_ID = self.p_ID
party = {'type':type, 'id':p_ID }
if(isinstance(state_map, dict)):
party['states'] = state_map # function pointers for state functions...
if(isinstance(trans_map, dict)):
party['transitions'] = trans_map
party['init'] = init_state # which state initializes the protocol
self.partyTypes[type] = party # makes sure
self.p_ID += 1
return True
return False
#
# def addValidTransitions(self, trans_map):
# if isinstance(trans_map, dict):
# self.trans_map = trans_map
def listStates(self, partyID):
# check if a member parameter is defined
if partyID < self.p_ctr:
return self.party[partyID]['states']
return None
def listParties(self):
return list(self.party.keys())
def listParyTypes(self):
return list(self.partyTypes.keys())
def getInitState(self, _type):
for i in self.listParties():
if self.party[i]['type'] == _type:
self._socket = self.party[i]['socket']
if self.party[i]['init']:
# set current trans starting point
self.cur_state = 1
return (True, self.listStates(i)[1])
else:
self.cur_state = 2
return (False, self.listStates(i)[2])
print("Returning junk!")
return (False, None)
def setState(self, state_num):
# find the corresponding call back based on current party id
self.nextCall = None
if state_num == None: return None
nextPossibleState = self._cur_trans.get(self.cur_state)
if type(nextPossibleState) == list and not state_num in nextPossibleState:
print("Invalid State Transition! Error!")
print("\tCurrent state: ", self.cur_state)
print("\tNext state: ", state_num)
print("Allowed states: ", nextPossibleState)
elif type(nextPossibleState) != list and nextPossibleState != state_num:
print("Invalid State Transition! Error!")
print("\tCurrent state: ", self.cur_state)
print("\tNext state not allowed: ", state_num)
# do not make the transition
return None
for i in self.listParties():
states = self.listStates(i)
if states.get(state_num) != None:
self.nextCall = states.get(state_num)
# preparing for state transition here.
self.cur_state = state_num
break
return None
def send_msg(self, object):
# use socket to send message (check if serializaton is required)
if self._socket != None:
if self._serialize:
result = self._user_serialize(object)
else:
result = self.serialize(object)
#print("DEBUG: send_msg : result =>", result)
if len(result) > self.max_size:
print("Message too long! max_size="+str(self.max_size))
return None
result = len(result).to_bytes(length=self.prefix_size, byteorder='big') + result
self._socket.send(result)
return None
# receives exactly n bytes
def recv_all(self, n):
recvd = 0
res = b''
while recvd < n:
res = res + self._socket.recv(n-recvd)
recvd = len(res)
return res
def recv_msg(self):
# read the socket and return the received message (check if deserialization)
# is necessary
if self._socket != None:
# block until data is available or remote host closes connection
msglen = int.from_bytes(self.recv_all(self.prefix_size), byteorder='big')
result = self.recv_all(msglen)
if result == '': return None
else:
if self._serialize:
return self._user_deserialize(result)
else: # default serialize call
return self.deserialize(result)
return None
# # serialize an object
# def serialize(self, object):
# if type(object) == str:
# return bytes(object, 'utf8')
# return object
#
# def deserialize(self, object):
# if type(object) == bytes:
# return object.decode('utf8')
# return object
def setSubclassVars(self, group, state=None):
if hasattr(group, 'serialize') and hasattr(group, 'deserialize'):
self.group = group
if state != None:
if type(state) == dict:
self.db = state
def get(self, keys, _type=tuple):
if not type(keys) == list: return
if _type == tuple:
ret = []
else: ret = {}
# get the data
for i in keys:
if _type == tuple:
ret.append(self.db[i])
else: # dict
ret[ i ] = self.db[i]
# return data
if _type == tuple:
return tuple(ret)
return ret
def store(self, *args):
for i in args:
if isinstance(i, tuple):
self.db[ i[0] ] = i[1]
return None
def serialize(self, object):
# print("input object... => ", object)
if type(object) == dict:
bytes_object = serializeDict(object, self.group)
return pickleObject(bytes_object)
elif type(object) == str:
return pickleObject(object)
else:
# print("serialize: just =>", object)
return object
def deserialize(self, bytes_object):
# print("deserialize input =>", bytes_object)
if type(bytes_object) == bytes:
object = unpickleObject(bytes_object)
if isinstance(object, dict):
return deserializeDict(object, self.group)
return object
# OPTIONAL
# derived class must call this function in order to
def setSerializers(self, serial, deserial):
self._serialize = True
self._user_serialize = serial
self._user_deserialize = deserial
return None
# records the final state of a protocol execution
def setErrorCode(self, value):
self.result = value
# executes state machine from the 'party_type' perspective
def execute(self, party_type, close_sock=True):
print("Party Descriptions:")
print(self.listParyTypes(), "\n")
# print("Executing protocol engine...")
# assume there are two parties: support more in the future.
# if len(self.listParties()) == 2:
# p1, p2 = self.listParties()
# print(self.listParties())
# main loop
# Timeout = False
(start, func) = self.getInitState(party_type)
self._cur_trans = self.partyTypes[party_type]['transitions']
#print("Possible transitions: ", self._cur_trans)
print("Starting Point => ", func.__name__)
if start == True:
# call the first state for party1, then send msg
output = func.__call__()
if type(output) == dict: self.db.update(output)
self.send_msg(output)
else:
# first receive message, call state function
# then send call response
input = self.recv_msg()
if type(input) == dict:
# print("input db :=>", input)
self.db.update(input)
output = func.__call__(input)
if isinstance(output, dict):
# print("output db :=>", output)
self.db.update(output)
self.send_msg(output)
# take output and send back to other party via socket
while self.nextCall != None:
input = self.recv_msg()
if isinstance(input, dict): self.db.update(input)
output = self.nextCall.__call__(input)
if output != None:
if isinstance(output, dict): self.db.update(output)
self.send_msg(output)
if close_sock:
self.clean()
return output
def check(self):
# cycle through parties, make sure they are differntly typed?
# p_ID must be at least 2
# ...
pass
def clean(self):
if debug: print("Cleaning database...")
self._socket.close()
self.db.clear()
print("PROTOCOL COMPLETE!")
return None
| JHUISI/charm | charm/core/engine/protocol.py | Python | lgpl-3.0 | 10,994 |
###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import os
from lazyflow.utility.pathHelpers import compressPathForDisplay, getPathVariants, PathComponents
SIMULATE_WINDOWS = False
class TestPathHelpers(object):
@classmethod
def setupClass(cls):
if SIMULATE_WINDOWS:
import ntpath
os.sep = ntpath.sep
os.path = ntpath
def testPathComponents(self):
components = PathComponents('/some/external/path/to/file.h5/with/internal/path/to/data')
assert components.externalPath == '/some/external/path/to/file.h5'
assert components.extension == '.h5'
assert components.internalPath == '/with/internal/path/to/data'
components = PathComponents('/some/external/path/to/file.h5_crazy_ext.h5/with/internal/path/to/data')
assert components.externalPath == '/some/external/path/to/file.h5_crazy_ext.h5'
assert components.extension == '.h5'
assert components.internalPath == '/with/internal/path/to/data'
# Everything should work for URLs, too.
components = PathComponents('http://somehost:8000/path/to/data/with.ext')
assert components.externalPath == 'http://somehost:8000/path/to/data/with.ext'
assert components.extension == '.ext'
assert components.internalPath is None
assert components.externalDirectory == 'http://somehost:8000/path/to/data'
assert components.filenameBase == 'with'
# Asterisk should be treated like an ordinary character for component purposes.
assert PathComponents('/tmp/hello*.png').totalPath() == '/tmp/hello*.png'
# Try modifying the properties and verify that the total path is updated.
components = PathComponents('/some/external/path/to/file.h5/with/internal/path/to/data')
components.extension = '.hdf5'
assert components.externalPath == '/some/external/path/to/file.hdf5'
assert components.totalPath() == '/some/external/path/to/file.hdf5/with/internal/path/to/data'
components.filenameBase = 'newbase'
assert components.totalPath() == '/some/external/path/to/newbase.hdf5/with/internal/path/to/data'
components.internalDirectory = 'new/internal/dir'
assert components.totalPath() == '/some/external/path/to/newbase.hdf5/new/internal/dir/data'
components.internalDatasetName = 'newdata'
assert components.totalPath() == '/some/external/path/to/newbase.hdf5/new/internal/dir/newdata'
components.externalDirectory = '/new/extern/dir/'
assert components.totalPath() == '/new/extern/dir/newbase.hdf5/new/internal/dir/newdata'
components.externalDirectory = '/new/extern/dir'
assert components.totalPath() == '/new/extern/dir/newbase.hdf5/new/internal/dir/newdata'
components.externalPath = '/new/externalpath/somefile.h5'
assert components.totalPath() == '/new/externalpath/somefile.h5/new/internal/dir/newdata'
components.filename = 'newfilename.h5'
assert components.totalPath() == '/new/externalpath/newfilename.h5/new/internal/dir/newdata'
components.internalPath = '/new/internal/path/dataset'
assert components.totalPath() == '/new/externalpath/newfilename.h5/new/internal/path/dataset'
def testCompressPathForDisplay(self):
assert compressPathForDisplay("/a/b.txt", 30) == "/a/b.txt"
path = "/test/bla/bla/this_is_a_very_long_filename_bla_bla.txt"
for l in [5,10,15,20,30]:
assert len(compressPathForDisplay(path, l)) == l
def test_getPathVariants(self):
abs, rel = getPathVariants('/aaa/bbb/ccc/ddd.txt', '/aaa/bbb/ccc/eee')
#assert abs == '/aaa/bbb/ccc/ddd.txt'
# Use normpath to make sure this test works on windows...
expected = os.path.normpath(os.path.join('/aaa/bbb/ccc/eee', '/aaa/bbb/ccc/ddd.txt')).replace('\\', '/')
assert abs == expected, "{} != {}".format( abs, expected )
assert rel == '../ddd.txt'
abs, rel = getPathVariants('../ddd.txt', '/aaa/bbb/ccc/eee')
#assert abs == '/aaa/bbb/ccc/ddd.txt'
# Use normpath to make sure this test works on windows...
assert abs == os.path.normpath(os.path.join('/aaa/bbb/ccc/eee', '../ddd.txt')).replace('\\', '/')
assert rel == '../ddd.txt'
abs, rel = getPathVariants('ddd.txt', '/aaa/bbb/ccc')
#assert abs == '/aaa/bbb/ccc/ddd.txt'
# Use normpath to make sure this test works on windows...
assert abs == os.path.normpath(os.path.join('/aaa/bbb/ccc', 'ddd.txt')).replace('\\', '/')
assert rel == 'ddd.txt'
assert getPathVariants('', '/abc') == ('/abc', ''), \
"{} != {}".format( getPathVariants('', '/abc'), ('/abc', '') )
if __name__ == "__main__":
import sys
import nose
sys.argv.append("--nocapture") # Don't steal stdout. Show it on the console as usual.
sys.argv.append("--nologcapture") # Don't set the logging level to DEBUG. Leave it alone.
nose.run(defaultTest=__file__)
| stuarteberg/lazyflow | tests/testPathHelpers.py | Python | lgpl-3.0 | 6,093 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
for all "spoke" CASUs, they emit when reading one specific directional
IR sensor.
set the direction that is sensed by the -d flag, from NESW
this is borrowed from `examples/targeted_messaging`, with server setup
that happens behind the scenes (i.e. from RTC files!)
'''
from assisipy import casu
import argparse, os
import time
class CasuController:
def __init__(self, rtc_file, direction, verb=False):
self.__casu = casu.Casu(rtc_file)
self._ctr_dir = direction
self.old_state = 'Off'
self.state = 'Off'
self.verb = verb
def stop(self):
self.__casu.stop()
def send_msg(self):
while True:
# North side => yellow
if self._ctr_dir == 'F' and self.__casu.get_range(casu.IR_F) < 2:
self.__casu.set_diagnostic_led_rgb(1, 1, 0, casu.DLED_TOP)
self.old_state = self.state
self.state = 'Yellow On'
# East => red
elif self._ctr_dir == 'R' and ((self.__casu.get_range(casu.IR_FR) <
2 or self.__casu.get_range(casu.IR_BR) < 2)):
self.__casu.set_diagnostic_led_rgb(1, 0, 0, casu.DLED_TOP)
self.old_state = self.state
self.state = 'Red On'
# South => blue
elif self._ctr_dir == 'S' and self.__casu.get_range(casu.IR_B) < 2:
self.__casu.set_diagnostic_led_rgb(0, 0, 1, casu.DLED_TOP)
self.old_state = self.state
self.state = 'Blue On'
# West => green
elif self._ctr_dir == 'W' and ((self.__casu.get_range(casu.IR_BR) <
2 or self.__casu.get_range(casu.IR_FR) < 2)):
self.__casu.set_diagnostic_led_rgb(0, 1, 0, casu.DLED_TOP)
self.old_state = self.state
self.state = 'Green On'
else:
self.__casu.diagnostic_led_standby(casu.DLED_TOP)
self.old_state = self.state
self.state = 'Off'
if self.old_state != self.state:
if self.old_state in ['Red On', 'Green On', 'Blue On',
'Yellow On']:
self.__casu.send_message('collector', 'Off')
if self.state in ['Red On', 'Green On', 'Blue On',
'Yellow On']:
self.__casu.send_message('collector', 'On')
def loop(self):
"""
Do some smart control stuff...
"""
while True:
self.send_msg()
time.sleep(0.5)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--rtc-path', type=str, default='',
help="location of RTC files to configure CASUs",)
parser.add_argument('-n', '--name', type=str, default='casu-ctr',
help="location of RTC files to configure CASUs",)
parser.add_argument('-v', '--verb', type=int, default=1,
help="verbosity level")
parser.add_argument('-d', '--dir', type=str, default='F',
help="direction of centre")
args = parser.parse_args()
fname = "{}.rtc".format(args.name)
rtc = os.path.join(args.rtc_path, fname)
print "connecting to casu {} ('{}')".format(args.name, rtc)
ctrl = CasuController(rtc_file=rtc, direction=args.dir, verb=args.verb)
try:
while True:
ctrl.loop()
except KeyboardInterrupt:
# cleanup
ctrl.stop()
| assisi/assisipy-examples | remote_sensors/spoke.py | Python | lgpl-3.0 | 3,590 |
# -*- coding: utf8 -*-
import math
from pycraft.common.util import ndarray, product
class GaussianKernel:
__slots__ = ['_size', '_kernel']
def __init__(self, size):
self._size = size
self._kernel = ndarray(2*size + 1, 2*size + 1)
bell_size = 1.0 / size
bell_height = 2 * size
for dx, dz in product(range(-size, size+1), range(-size, size+1)):
bx = bell_size * dx
bz = bell_size * dz
self._kernel[dx+size][dz+size] = \
bell_height * math.exp(-(bx**2 + bz**2) / 2)
def get(self, dx, dz):
return self._kernel[dx+self._size][dz+self._size]
| nosix/PyCraft | src/pycraft/service/primitive/fuzzy/gaussian.py | Python | lgpl-3.0 | 658 |
"""
Copyright (C) 2008-2013 Tomasz Bursztyka
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
""" ProgramHeader and Program classes """
from elf.core.property import VALUE_FIXED, VALUE_BITWISE
from elf.core.header import Header
from elf.core.page import Page
from elf.utils import mirrorDict
phdr_type = {
'PT_NULL' : 0,
'PT_LOAD' : 1,
'PT_DYNAMIC' : 2,
'PT_INTERP' : 3,
'PT_NOTE' : 4,
'PT_SHLIB' : 5,
'PT_PHDR' : 6,
'PT_TLS' : 7,
'PT_NUM' : 8,
'PT_LOOS' : 0x60000000,
'PT_GNU_EH_FRAME' : 0x6474e550,
'PT_GNU_STACK' : 0x6474e551,
'PT_GNU_RELRO' : 0x6474e552,
'PT_PAX_FLAGS' : 0x65041580,
'PT_LOSUNW' : 0x6ffffffa,
'PT_SUNWBSS' : 0x6ffffffa,
'PT_SUNWSTACK' : 0x6ffffffb,
'PT_HISUNW' : 0x6fffffff,
'PT_HIOS' : 0x6fffffff,
'PT_LOPROC' : 0x70000000,
'PT_HIPROC' : 0x7fffffff,
'PT_MIPS_REGINFO' : 0x70000000,
'PT_MIPS_RTPROC' : 0x70000001,
'PT_MIPS_OPTIONS' : 0x70000002,
'PT_HP_TLS' : (0x60000000 + 0x0),
'PT_HP_CORE_NONE' : (0x60000000 + 0x1),
'PT_HP_CORE_VERSION' : (0x60000000 + 0x2),
'PT_HP_CORE_KERNEL' : (0x60000000 + 0x3),
'PT_HP_CORE_COMM' : (0x60000000 + 0x4),
'PT_HP_CORE_PROC' : (0x60000000 + 0x5),
'PT_HP_CORE_LOADABLE' : (0x60000000 + 0x6),
'PT_HP_CORE_STACK' : (0x60000000 + 0x7),
'PT_HP_CORE_SHM' : (0x60000000 + 0x8),
'PT_HP_CORE_MMF' : (0x60000000 + 0x9),
'PT_HP_PARALLEL' : (0x60000000 + 0x10),
'PT_HP_FASTBIND' : (0x60000000 + 0x11),
'PT_HP_OPT_ANNOT' : (0x60000000 + 0x12),
'PT_HP_HSL_ANNOT' : (0x60000000 + 0x13),
'PT_HP_STACK' : (0x60000000 + 0x14),
'PT_PARISC_ARCHEXT' : 0x70000000,
'PT_PARISC_UNWIND' : 0x70000001,
'PT_ARM_EXIDX' : 0x70000001,
'PT_IA_64_ARCHEXT' : (0x70000000 + 0),
'PT_IA_64_UNWIND' : (0x70000000 + 1),
'PT_IA_64_HP_OPT_ANOT' : (0x60000000 + 0x12),
'PT_IA_64_HP_HSL_ANOT' : (0x60000000 + 0x13),
'PT_IA_64_HP_STACK' : (0x60000000 + 0x14),
}
phdr_type = mirrorDict(phdr_type)
phdr_flags = {
'PF_X' : (1 << 0),
'PF_W' : (1 << 1),
'PF_R' : (1 << 2),
'PF_PAGEEXEC' : (1 << 4),
'PF_NOPAGEEXEC' : (1 << 5),
'PF_SEGMEXEC' : (1 << 6),
'PF_NOSEGMEXEC' : (1 << 7),
'PF_MPROTECT' : (1 << 8),
'PF_NOMPROTECT' : (1 << 9),
'PF_RANDEXEC' : (1 << 10),
'PF_NORANDEXEC' : (1 << 11),
'PF_EMUTRAMP' : (1 << 12),
'PF_NOEMUTRAMP' : (1 << 13),
'PF_RANDMMAP' : (1 << 14),
'PF_NORANDMMAP' : (1 << 15),
'PF_MASKOS' : 0x0ff00000,
'PF_MASKPROC' : 0xf0000000,
'PF_MIPS_LOCAL' : 0x10000000,
'PF_PARISC_SBP' : 0x08000000,
'PF_HP_PAGE_SIZE' : 0x00100000,
'PF_HP_FAR_SHARED' : 0x00200000,
'PF_HP_NEAR_SHARED' : 0x00400000,
'PF_HP_CODE' : 0x01000000,
'PF_HP_MODIFY' : 0x02000000,
'PF_HP_LAZYSWAP' : 0x04000000,
'PF_HP_SBP' : 0x08000000,
'PF_ARM_SB' : 0x10000000,
'PF_IA_64_NORECOV' : 0x80000000,
}
phdr_flags = mirrorDict(phdr_flags)
class ProgramHeader( Header ):
descriptions_32 = [ 'p_type', 'p_offset', 'p_vaddr', 'p_paddr',
'p_filesz', 'p_memsz', 'p_flags', 'p_align' ]
descriptions_64 = [ 'p_type', 'p_flags', 'p_offset', 'p_vaddr',
'p_paddr', 'p_filesz', 'p_memsz', 'p_align' ]
hr_values = {
'p_type' : [ VALUE_FIXED, phdr_type ],
'p_flags' : [ VALUE_BITWISE, phdr_flags ],
}
format_32 = [ 'i', 'I', 'I', 'I', 'i', 'i', 'i', 'I' ]
format_64 = [ 'i', 'i', 'Q', 'Q', 'Q', 'q', 'q', 'Q' ]
def affect(self, program):
try:
self.p_vaddr -= self.p_offset - program.offset_start
self.p_paddr = self.p_vaddr
self.p_offset = program.offset_start
self.p_filesz = program.size
except Exception:
pass
class Program( Page ):
def __init__(self, phdr):
Page.__init__(self, phdr, phdr.p_offset, phdr.p_filesz)
self.protected = True
#######
# EOF #
#######
| tbursztyka/python-elf | elf/program.py | Python | lgpl-3.0 | 5,074 |
# Copyright (C) 2009 Gaetan Guidet
#
# This file is part of pygl.
#
# luagl is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or (at
# your option) any later version.
#
# luagl is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
import sys
import pygl
from pygl import gl
from pygl import glut
prog = None
mesh = None
mesh_idx = None
use_arrays = True
elapsed_time = 0 # in milliseconds
step = 1
time_param = 1
def checkVertex(element):
if element[0] != element.position.x:
raise Exception("position.x mismatch")
if element[1] != element.position.y:
raise Exception("position.y mismatch")
if element[2] != element.position.z:
raise Exception("position.z mismatch")
if element[3] != element.texcoord.s:
raise Exception("texcoord.s mismatch")
if element[4] != element.texcoord.t:
raise Exception("texcoord.t mismatch")
def initMesh():
global mesh, mesh_idx
mesh_fmt = pygl.buffer.MakeStructure(["position", ["x", "y", "z"]], ["texcoord", ["s", "t"]])
mesh = pygl.buffer.New(pygl.buffer.Float, mesh_fmt, 4)
# StructuredBuffer sucks ...
mesh_idx = pygl.buffer.New(pygl.buffer.Ushort, 4)
mesh_idx[0] = 0
mesh_idx[1] = 1
mesh_idx[2] = 2
mesh_idx[3] = 3
v = mesh.element(0)
v[0] = 0
v[1] = 0
v[2] = 0
v[3] = 0
v[4] = 0
print("check v0")
checkVertex(v)
v = mesh.element(1)
v.position.x = 1
v.position.y = 0
v.position.z = 0
v.texcoord.s = 1
v.texcoord.t = 0
print("check v1")
checkVertex(v)
v = mesh.element(2)
v[0] = 1
v[1] = 1
v[2] = 0
v[3] = 1
v[4] = 1
print("check v2")
checkVertex(v)
v = mesh.element(3)
v[0] = 0
v[1] = 1
v[2] = 0
v[3] = 0
v[4] = 1
print("check v3")
checkVertex(v)
print(mesh)
def drawMesh():
global mesh, mesh_idx, use_arrays
if use_arrays == True:
positions = mesh.field("position")
texcoords = mesh.field("texcoord")
stride = mesh.elementSize
gl.EnableClientState(gl.VERTEX_ARRAY)
gl.ClientActiveTexture(gl.TEXTURE0)
gl.EnableClientState(gl.TEXTURE_COORD_ARRAY)
gl.TexCoordPointer(2, gl.FLOAT, stride, texcoords.rawPtr)
gl.VertexPointer(3, gl.FLOAT, stride, positions.rawPtr)
#gl.DrawArrays(gl.QUADS, 0, 4)
gl.DrawElements(gl.QUADS, 4, gl.UNSIGNED_SHORT, mesh_idx.rawPtr)
gl.DisableClientState(gl.TEXTURE_COORD_ARRAY)
gl.DisableClientState(gl.VERTEX_ARRAY)
else:
gl.Begin(gl.QUADS)
for e in mesh:
gl.MultiTexCoord2f(gl.TEXTURE0, e.texcoord.s, e.texcoord.t);
gl.Color3f(1, 1, 1)
gl.Vertex3fv(e.position)
gl.End()
def initShaders():
global prog
vprog_src = [
"void main() {\n",
" gl_TexCoord[0] = gl_MultiTexCoord0;\n",
" gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;\n",
"}\n"
]
fprog_src = [
"uniform float time;\n",
"void main() {\n",
" gl_FragColor = time * vec4(gl_TexCoord[0].s, gl_TexCoord[0].t, 1.0, 0.0);\n",
"}\n"
]
prog = gl.CreateProgram()
vprog = gl.CreateShader(gl.VERTEX_SHADER)
gl.ShaderSource(vprog, vprog_src)
gl.CompileShader(vprog)
print("Compile vertex shader: %s" % gl.GetShaderInfoLog(vprog))
fprog = gl.CreateShader(gl.FRAGMENT_SHADER)
gl.ShaderSource(fprog, fprog_src)
gl.CompileShader(fprog)
print("Compile fragment shader: %s" % gl.GetShaderInfoLog(fprog))
gl.AttachShader(prog, vprog)
gl.AttachShader(prog, fprog)
gl.LinkProgram(prog)
print("Link: %s" % gl.GetProgramInfoLog(prog))
print("Vertex shader source: %s" % gl.GetShaderSource(vprog))
print("Fragment shader source: %s" % gl.GetShaderSource(fprog))
# callbacks
def display():
global prog, time_param
gl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)
gl.PushMatrix()
gl.UseProgram(prog)
loc = gl.GetUniformLocation(prog, "time")
gl.Uniform1f(loc, time_param)
drawMesh()
gl.UseProgram(0)
gl.PopMatrix()
glut.SwapBuffers()
def reshape(w, h):
print("reshape %dx%d" % (w, h))
gl.Viewport(0, 0, w, h)
gl.MatrixMode(gl.PROJECTION)
gl.LoadIdentity()
gl.Ortho(0, 1, 0, 1, -1, 1)
gl.MatrixMode(gl.MODELVIEW)
gl.LoadIdentity()
glut.PostRedisplay()
def keyboard(key, x, y):
global use_arrays
if (key == 27):
print("Quit")
glut.Exit(0)
elif key == 97: # 'A'
use_arrays = not use_arrays
glut.PostRedisplay()
else:
print("Key: %s" % key)
def menu(val):
print("MenuEntry %s selected" % val)
if val == 1:
print("Quit")
glut.Exit(0)
def submenu(val):
print("SubMenuEntry %s selected: %s" % (val, ["Hello", "Goodbye"][val]))
def fade():
global elapsed_time, step, time_param
time_param = elapsed_time * 0.01
elapsed_time = elapsed_time + step
if elapsed_time >= 100:
step = -1
elif elapsed_time == 0:
step = 1
glut.PostRedisplay()
#glut.TimerFunc(1, fade, 0)
def printMatrix(m):
s = ""
# line
for i in xrange(4):
s = s + "{ ";
# col
for j in xrange(4):
s = s + "%s " % m[i+j*4]
s = s + "}\n"
print(s)
def initGL():
gl.Init()
print("OpenGL version: %s" % gl.version)
gl.ClearColor(0, 0, 0, 1)
gl.ClearDepth(1)
gl.DepthFunc(gl.LESS)
gl.ShadeModel(gl.SMOOTH)
gl.Enable(gl.LIGHTING)
gl.Enable(gl.LIGHT0)
gl.Enable(gl.DEPTH_TEST)
gl.Enable(gl.CULL_FACE)
gl.FrontFace(gl.CCW)
initMesh()
initShaders()
glut.Init()
glut.InitWindowPosition(50, 50)
glut.InitWindowSize(640, 480)
dm = glut.RGBA|glut.DEPTH|glut.DOUBLE
glut.InitDisplayMode(dm)
glut.CreateWindow("PyGLUT")
initGL()
glut.DisplayFunc(display)
glut.ReshapeFunc(reshape)
glut.KeyboardFunc(keyboard)
#glut.TimerFunc(1, fade, 0)
glut.IdleFunc(fade)
smid = glut.CreateMenu(submenu)
glut.AddMenuEntry("Hello", 0)
glut.AddMenuEntry("Goodbye", 1)
mid = glut.CreateMenu(menu)
glut.AddSubMenu("Greetings", smid)
glut.AddMenuEntry("Quit", 1)
glut.AttachMenu(glut.RIGHT_BUTTON)
glut.MainLoop()
| gatgui/pygl | python/test_gl.py | Python | lgpl-3.0 | 6,380 |
#!/usr/bin/python
###############################################################################
#
#
# Project: ECOOP, sponsored by The National Science Foundation
# Purpose: this code is part of the Cyberinfrastructure developed for the ECOOP project
# http://tw.rpi.edu/web/project/ECOOP
# from the TWC - Tetherless World Constellation
# at RPI - Rensselaer Polytechnic Institute
# founded by NSF
#
# Author: Massimo Di Stefano , distem@rpi.edu -
# http://tw.rpi.edu/web/person/MassimoDiStefano
#
###############################################################################
# Copyright (c) 2008-2014 Tetherless World Constellation at Rensselaer Polytechnic Institute
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
from zipfile import ZipFile, ZIP_DEFLATED
from contextlib import closing
import paramiko
import qrcode
from IPython.core.display import HTML, Image
from IPython.display import display, Javascript
import envoy
from datetime import datetime
class shareUtil():
def zipdir(self, basedir, archivename, rm='no'):
"""
utility function to zip a single file or a directory
usage : zipdir(input, output)
@param basedir: input file or directory
@param archivename: output file.zip
@param rm: [yes, no], remove source file (optional, default=no)
"""
assert os.path.isdir(basedir)
with closing(ZipFile(archivename, "w", ZIP_DEFLATED)) as z:
for root, dirs, files in os.walk(basedir):
#NOTE: ignore empty directories
for fn in files:
#print fn
absfn = os.path.join(root, fn)
zfn = absfn[len(basedir) + len(os.sep):] #XXX: relative path
z.write(absfn, zfn)
if rm != 'no':
instruction = 'rm -rf %s' % basedir
os.system(instruction)
def uploadfile(self, username='epi', password='epi', hostname='localhost', port=22,
inputfile=None, outputfile=None, link=False, apacheroot='/var/www/', zip=False, qr=False):
'''
utility to upload file on remote server using sftp protocol
usage : uploadfile(inputfile, outputfile)
@rtype : str
@param username: str - username on remote server
@param password: str - password to access remote server
@param hostname: str - hostname of remote server (default: localhost)
@param port: port number on remote server (default: 22)
@param inputfile: str - local path to the file to uploaded
@param outputfile: remote path to the file to upload
@param link: bolean [True, False] default False, print a link to download the file
(remote path needs to be in a web available directory)
@param apacheroot: path to apache root default to '/var/www/' required if link == True
@param zip: bolean deafault False, zip the output
@param qr: bolean deafault False, return qrcode as image
@return: link to uploaded file if link=True or qr image if qr=True & link=True, none if link is set to false
'''
if zip:
#print 'add zipfile'
zipfile = str(inputfile + '.zip')
self.zipdir(inputfile, zipfile)
inputfile = zipfile
#paramiko.util.log_to_file('/var/www/esr/paramiko.log')
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(hostname, username=username, password=password)
transport = paramiko.Transport((hostname, port))
transport.connect(username=username, password=password)
sftp = paramiko.SFTPClient.from_transport(transport)
parts = outputfile.split('/')
for n in range(2, len(parts)):
path = '/'.join(parts[:n])
#print 'Path:', path,
sys.stdout.flush()
try:
s = sftp.stat(path)
#print 'mode =', oct(s.st_mode)
except IOError as e:
#print e
#print 'adding dir: ', path
sftp.mkdir(path)
try:
sftp.put(remotepath=outputfile, localpath=inputfile)
sftp.close()
transport.close()
print 'file uploaded'
if qr:
if link:
pass
if not link:
print 'WORNING: qrcode not generated, set the option link to True'
if link:
filelink = outputfile.replace(apacheroot, '')
link = 'http://' + os.path.normpath(hostname + '/' + filelink)
raw_html = '<a href="%s" target="_blank">ESR results</a>' % link
print 'results are now available for download at : ', link
image = None
if qr:
imagefile = parts[-1].split('.')[0] + '.jpeg'
qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=4)
qr.add_data(link)
qr.make(fit=True)
img = qr.make_image()
img.save(imagefile, "JPEG")
print 'alive'
image = Image(imagefile)
return image
if not qr:
return HTML(raw_html)
except IOError:
print "Error: can\'t find file or read data check if input file exist and or remote location is writable"
def gistit(self, filename, jist='/usr/local/bin/jist', type='notebook'):
'''
use the jist utility to paste a txt file on github as gist and return a link to it
usage : gistit(notebookfile)
@param filename: str - path to the a text file or notebook file (.json)
@param jist: str - path to the executable jist (default=/usr/local/bin/jist)
@param type: str - notebook, text
@return: return a link to gist if type=text, link to nbviewer if type=notebook
'''
try:
with open(filename):
link = None
jist = self.which(jist)
if jist:
try:
r = envoy.run('%s -p %s' % (jist, filename))
if type == 'notebook':
link = r.std_out.replace('\n', '').replace('https://gist.github.com',
'http://nbviewer.ipython.org')
if type == 'text':
link = r.std_out.replace('\n', '')
return link
except:
print "can't generate gist, check if jist works bycommand line with: jist -p filename"
if not jist:
print 'cannot find jist utility, check if it is in your path'
except IOError:
print 'input file %s not found' % filename
def get_id(self, suffix, makedir=True):
'''
generate a directory based on the suffix and a time stamp
output looks like : suffix_Thursday_26_September_2013_06_28_49_PM
usage: getID(suffix)
@param suffix: str - suffix for the directory to be generated,
@return: str - directory name
'''
ID = suffix + '_' + str(datetime.now().utcnow().strftime("%A_%d_%B_%Y_%I_%M_%S_%p"))
if makedir:
self.ensure_dir(ID)
print 'session data directory : ID', ID
return ID
def ensure_dir(self, dir):
'''
make a directory on the file system if it does not exist
usage: ensure_dir(dir)
@param dir: str - path to a directory existent on the local filesystem
@return: None
'''
if not os.path.exists(dir):
os.makedirs(dir)
def save_notebook(self, ID, notebookname, web=None, notebookdir=None):
"""
Save the notebook file as html and or as gist
@param ID: directory name where to store the saved notebook
@param notebookname: name of the notebook
@param web:
@param notebookdir:
"""
if not notebookdir:
notebookdir = os.getcwd()
display(Javascript("IPython.notebook.save_notebook()"))
notebookfile = os.path.join(notebookdir, notebookname)
savedir = os.path.join(os.getcwd(), ID)
command1 = 'cp %s %s' % (notebookfile, savedir)
newnotebook = os.path.join(savedir, notebookname)
command2 = 'ipython nbconvert %s' % newnotebook
os.system(command1)
os.system(command2)
if web:
try:
self.gistit(notebookfile)
except IOError:
print "can't genrate a gist"
def which(self, program):
"""
Check if a program exist and return the full path
@param program: executable name or path to executable
@return: full path to executable
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def getTime(self):
now = datetime.now()
return now | epifanio/ecoop | ecooputil.py | Python | lgpl-3.0 | 10,832 |
# pyresample, Resampling of remote sensing image data in python
#
# Copyright (C) 2012, 2014, 2015 Esben S. Nielsen
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
# workaround python bug: http://bugs.python.org/issue15881#msg170215
import multiprocessing
from setuptools import setup
import sys
import imp
version = imp.load_source('pyresample.version', 'pyresample/version.py')
requirements = ['pyproj', 'numpy', 'configobj']
extras_require = {'pykdtree': ['pykdtree'],
'numexpr': ['numexpr'],
'quicklook': ['matplotlib', 'basemap']}
if sys.version_info < (2, 6):
# multiprocessing is not in the standard library
requirements.append('multiprocessing')
setup(name='pyresample',
version=version.__version__,
description='Resampling of remote sensing data in Python',
author='Thomas Lavergne',
author_email='t.lavergne@met.no',
package_dir={'pyresample': 'pyresample'},
packages=['pyresample'],
install_requires=requirements,
extras_require=extras_require,
test_suite='pyresample.test.suite',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering'
]
)
| jhamman/pyresample | setup.py | Python | lgpl-3.0 | 2,103 |
# IfcOpenShell - IFC toolkit and geometry engine
# Copyright (C) 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of IfcOpenShell.
#
# IfcOpenShell is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IfcOpenShell is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with IfcOpenShell. If not, see <http://www.gnu.org/licenses/>.
import ifcopenshell
import ifcopenshell.api
class Usecase:
def __init__(self, file, **settings):
self.file = file
self.settings = {
"relating_process": None,
"related_object": None,
}
for key, value in settings.items():
self.settings[key] = value
def execute(self):
if self.settings["related_object"].HasAssignments:
for assignment in self.settings["related_object"].HasAssignments:
if (
assignment.is_a("IfclRelAssignsToProcess")
and assignment.RelatingProcess == self.settings["relating_process"]
):
return
operates_on = None
if self.settings["relating_process"].OperatesOn:
operates_on = self.settings["relating_process"].OperatesOn[0]
if operates_on:
related_objects = list(operates_on.RelatedObjects)
related_objects.append(self.settings["related_object"])
operates_on.RelatedObjects = related_objects
ifcopenshell.api.run("owner.update_owner_history", self.file, **{"element": operates_on})
else:
operates_on = self.file.create_entity(
"IfcRelAssignsToProcess",
**{
"GlobalId": ifcopenshell.guid.new(),
"OwnerHistory": ifcopenshell.api.run("owner.create_owner_history", self.file),
"RelatedObjects": [self.settings["related_object"]],
"RelatingProcess": self.settings["relating_process"],
}
)
return operates_on
| IfcOpenShell/IfcOpenShell | src/ifcopenshell-python/ifcopenshell/api/sequence/assign_process.py | Python | lgpl-3.0 | 2,464 |
'''
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
'''
# Building a Standard Primal Linear Programming Problem
# in Python/Gurobi[gurobipy]
#
'''
Adapted from:
Daskin, M. S.
1995
Network and Discrete Location: Models, Algorithms, and Applications
Hoboken, NJ, USA: John Wiley & Sons, Inc.
'''
# Imports
import numpy as np
import gurobipy as gbp
import datetime as dt
def GbpStPrimLP():
# Constants
Aij = np.random.randint(5, 50, 25)
Aij = Aij.reshape(5,5)
AijSum = np.sum(Aij)
Cj = np.random.randint(10, 20, 5)
CjSum = np.sum(Cj)
Bi = np.random.randint(10, 20, 5)
BiSum = np.sum(Bi)
# Matrix Shape
rows = range(len(Aij))
cols = range(len(Aij[0]))
# Instantiate Model
mPrimal_Standard_GUROBI = gbp.Model(' -- Standard Primal Linear Programming Problem -- ')
# Set Focus to Optimality
gbp.setParam('MIPFocus', 2)
# Decision Variables
desc_var = []
for dest in cols:
desc_var.append([])
desc_var[dest].append(mPrimal_Standard_GUROBI.addVar(vtype=gbp.GRB.CONTINUOUS,
name='y'+str(dest+1)))
# Surplus Variables
surp_var = []
for orig in rows:
surp_var.append([])
surp_var[orig].append(mPrimal_Standard_GUROBI.addVar(vtype=gbp.GRB.CONTINUOUS,
name='s'+str(orig+1)))
# Update Model
mPrimal_Standard_GUROBI.update()
#Objective Function
mPrimal_Standard_GUROBI.setObjective(gbp.quicksum(Cj[dest]*desc_var[dest][0]
for dest in cols),
gbp.GRB.MINIMIZE)
# Constraints
for orig in rows:
mPrimal_Standard_GUROBI.addConstr(gbp.quicksum(Aij[orig][dest]*desc_var[dest][0]
for dest in cols)
- surp_var[orig][0]
- Bi[orig] == 0)
# Optimize
try:
mPrimal_Standard_GUROBI.optimize()
except Exception as e:
print ' ################################################################'
print ' < ISSUE : ', e, ' >'
print ' ################################################################'
# Write LP file
mPrimal_Standard_GUROBI.write('LP.lp')
print '\n*************************************************************************'
print ' | Decision Variables'
for v in mPrimal_Standard_GUROBI.getVars():
print ' | ', v.VarName, '=', v.x
print '*************************************************************************'
val = mPrimal_Standard_GUROBI.objVal
print ' | Objective Value ------------------ ', val
print ' | Aij Sum -------------------------- ', AijSum
print ' | Cj Sum --------------------------- ', CjSum
print ' | Bi Sum --------------------------- ', BiSum
print ' | Matrix Dimensions ---------------- ', Aij.shape
print ' | Date/Time ------------------------ ', dt.datetime.now()
print '*************************************************************************'
print '-- Gurobi Standard Primal Linear Programming Problem --'
try:
GbpStPrimLP()
print '\nJames Gaboardi, 2015'
except Exception as e:
print ' ################################################################'
print ' < ISSUE : ', e, ' >'
print ' ################################################################' | jGaboardi/LP_MIP | Gurobi_Primal_Standard.py | Python | lgpl-3.0 | 3,759 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# === django_pgmp ---------------------------------------------------------===
# This file is part of django-pgpm. django-pgpm is copyright © 2012, RokuSigma
# Inc. and contributors. See AUTHORS and LICENSE for more details.
#
# django-pgpm is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# django-pgpm is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with django-pgpm. If not, see <http://www.gnu.org/licenses/>.
# ===----------------------------------------------------------------------===
VERSION = (0,0,4, 'alpha', 0)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%spre-alpha' % version
else:
if VERSION[3] != 'final':
version = "%s%s" % (version, VERSION[3])
if VERSION[4] != 0:
version = '%s%s' % (version, VERSION[4])
return version
# ===----------------------------------------------------------------------===
# End of File
# ===----------------------------------------------------------------------===
| monetizeio/django-pgmp | django_pgmp/__init__.py | Python | lgpl-3.0 | 1,587 |
#from satpy import Scene
from satpy.utils import debug_on
debug_on()
#from glob import glob
#base_dir="/data/COALITION2/database/meteosat/radiance_HRIT/case-studies/2015/07/07/"
#import os
#os.chdir(base_dir)
#filenames = glob("*201507071200*__")
#print base_dir
#print filenames
##global_scene = Scene(reader="hrit_msg", filenames=filenames, base_dir=base_dir, ppp_config_dir="/opt/users/hau/PyTroll//cfg_offline/")
#global_scene = Scene(reader="hrit_msg", filenames=filenames, base_dir=base_dir, ppp_config_dir="/opt/users/hau/PyTroll/packages/satpy/satpy/etc")
#from satpy import available_readers
#available_readers()
# new version of satpy after 0.8
#################################
from satpy import find_files_and_readers, Scene
from datetime import datetime
import numpy as np
show_details=False
save_overview=True
files_sat = find_files_and_readers(sensor='seviri',
start_time=datetime(2015, 7, 7, 12, 0),
end_time=datetime(2015, 7, 7, 12, 0),
base_dir="/data/COALITION2/database/meteosat/radiance_HRIT/case-studies/2015/07/07/",
reader="seviri_l1b_hrit")
#print files_sat
#files = dict(files_sat.items() + files_nwc.items())
files = dict(files_sat.items())
global_scene = Scene(filenames=files) # not allowed any more: reader="hrit_msg",
print dir(global_scene)
#global_scene.load([0.6, 0.8, 10.8])
#global_scene.load(['IR_120', 'IR_134'])
if save_overview:
global_scene.load(['overview',0.6, 0.8])
else:
global_scene.load([0.6,0.8])
#print(global_scene[0.6]) # works only if you load also the 0.6 channel, but not an RGB that contains the 0.6
#!!# print(global_scene['overview']) ### this one does only work in the develop version
global_scene.available_dataset_names()
global_scene["ndvi"] = (global_scene[0.8] - global_scene[0.6]) / (global_scene[0.8] + global_scene[0.6])
# !!! BUG: will not be resampled in global_scene.resample(area)
#from satpy import DatasetID
#my_channel_id = DatasetID(name='IR_016', calibration='radiance')
#global_scene.load([my_channel_id])
#print(scn['IR_016'])
#area="eurol"
#area="EuropeCanaryS95"
area="ccs4"
local_scene = global_scene.resample(area)
if show_details:
help(local_scene)
print global_scene.available_composite_ids()
print global_scene.available_composite_names()
print global_scene.available_dataset_names()
print global_scene.available_writers()
if save_overview:
#local_scene.show('overview')
local_scene.save_dataset('overview', './overview_'+area+'.png', overlay={'coast_dir': '/data/OWARNA/hau/maps_pytroll/', 'color': (255, 255, 255), 'resolution': 'i'})
print 'display ./overview_'+area+'.png &'
local_scene["ndvi"] = (local_scene[0.8] - local_scene[0.6]) / (local_scene[0.8] + local_scene[0.6])
#local_scene["ndvi"].area = local_scene[0.8].area
print "local_scene[\"ndvi\"].min()", local_scene["ndvi"].compute().min()
print "local_scene[\"ndvi\"].max()", local_scene["ndvi"].compute().max()
lsmask_file="/data/COALITION2/database/LandSeaMask/SEVIRI/LandSeaMask_"+area+".nc"
from netCDF4 import Dataset
ncfile = Dataset(lsmask_file,'r')
# Read variable corresponding to channel name
lsmask = ncfile.variables['lsmask'][:,:] # attention [:,:] or [:] is really necessary
import dask.array as da
#print 'type(local_scene["ndvi"].data)', type(local_scene["ndvi"].data), local_scene["ndvi"].data.compute().shape
#print "type(lsmask)", type(lsmask), lsmask.shape, lsmask[:,:,0].shape,
#local_scene["ndvi"].data.compute()[lsmask[:,:,0]==0]=np.nan
ndvi_numpyarray=local_scene["ndvi"].data.compute()
if area=="EuropeCanaryS95":
ndvi_numpyarray[lsmask[::-1,:,0]==0]=np.nan
else:
ndvi_numpyarray[lsmask[:,:,0]==0]=np.nan
local_scene["ndvi"].data = da.from_array(ndvi_numpyarray, chunks='auto')
#local_scene["ndvi"].data = local_scene["ndvi"].data.where(lsmask!=0)
colorized=True
if not colorized:
#local_scene.save_dataset('ndvi', './ndvi_'+area+'.png')
local_scene.save_dataset('ndvi', './ndvi_'+area+'.png', overlay={'coast_dir': '/data/OWARNA/hau/maps_pytroll/', 'color': (255, 255, 255), 'resolution': 'i'})
#print dir(local_scene.save_dataset)
else:
# https://github.com/pytroll/satpy/issues/459
# from satpy.enhancements import colorize
# colorize(img, **kwargs)
# 'ylgn'
# https://satpy.readthedocs.io/en/latest/writers.html
# nice NDVI colourbar here:
# https://www.researchgate.net/figure/NDVI-maps-Vegetation-maps-created-by-measuring-the-Normalized-Vegetation-Difference_fig7_323885082
from satpy.composites import BWCompositor
from satpy.enhancements import colorize
from satpy.writers import to_image
compositor = BWCompositor("test", standard_name="ndvi")
composite = compositor((local_scene["ndvi"], ))
img = to_image(composite)
#from trollimage import colormap
#dir(colormap)
# 'accent', 'blues', 'brbg', 'bugn', 'bupu', 'colorbar', 'colorize', 'dark2', 'diverging_colormaps', 'gnbu', 'greens',
# 'greys', 'hcl2rgb', 'np', 'oranges', 'orrd', 'paired', 'palettebar', 'palettize', 'pastel1', 'pastel2', 'piyg', 'prgn',
# 'pubu', 'pubugn', 'puor', 'purd', 'purples', 'qualitative_colormaps', 'rainbow', 'rdbu', 'rdgy', 'rdpu', 'rdylbu', 'rdylgn',
# 'reds', 'rgb2hcl', 'sequential_colormaps', 'set1', 'set2', 'set3', 'spectral', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd'
# kwargs = {"palettes": [{"colors": 'ylgn',
# "min_value": -0.1, "max_value": 0.9}]}
#arr = np.array([[230, 227, 227], [191, 184, 162], [118, 148, 61], [67, 105, 66], [5, 55, 8]])
arr = np.array([ [ 95, 75, 49], [210, 175, 131], [118, 148, 61], [67, 105, 66], [28, 29, 4]])
np.save("/tmp/binary_colormap.npy", arr)
kwargs = {"palettes": [{"filename": "/tmp/binary_colormap.npy",
"min_value": -0.1, "max_value": 0.8}]}
colorize(img, **kwargs)
from satpy.writers import add_decorate, add_overlay
decorate = {
'decorate': [
{'logo': {'logo_path': '/opt/users/common/logos/meteoSwiss.png', 'height': 60, 'bg': 'white','bg_opacity': 255, 'align': {'top_bottom': 'top', 'left_right': 'right'}}},
{'text': {'txt': ' MSG, '+local_scene.start_time.strftime('%Y-%m-%d %H:%MUTC')+', '+ area+', NDVI',
'align': {'top_bottom': 'top', 'left_right': 'left'},
'font': "/usr/openv/java/jre/lib/fonts/LucidaTypewriterBold.ttf",
'font_size': 19,
'height': 25,
'bg': 'white',
'bg_opacity': 0,
'line': 'white'}}
]
}
img = add_decorate(img, **decorate) #, fill_value='black'
img = add_overlay(img, area, '/data/OWARNA/hau/maps_pytroll/', color='red', width=0.5, resolution='i', level_coast=1, level_borders=1, fill_value=None)
#from satpy.writers import compute_writer_results
#res1 = scn.save_datasets(filename="/tmp/{name}.png",
# writer='simple_image',
# compute=False)
#res2 = scn.save_datasets(filename="/tmp/{name}.tif",
# writer='geotiff',
# compute=False)
#results = [res1, res2]
#compute_writer_results(results)
#img.show()
img.save('./ndvi_'+area+'.png')
print 'display ./ndvi_'+area+'.png &'
| meteoswiss-mdr/monti-pytroll | scripts/demo_satpy_ndvi_decorate.py | Python | lgpl-3.0 | 7,498 |
'''Copyright (C) 2015 by Wesley Tansey
This file is part of the GFL library.
The GFL library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The GFL library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with the GFL library. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
from numpy.ctypeslib import ndpointer
from ctypes import *
from pygfl.utils import *
'''Load the graph fused lasso library'''
try:
graphfl_lib = cdll.LoadLibrary("libgraphfl.so")
except OSError:
_libgraphfl_file = get_libgraphfl()
graphfl_lib = cdll.LoadLibrary(_libgraphfl_file)
graphfl = graphfl_lib.graph_fused_lasso_warm
graphfl.restype = c_int
graphfl.argtypes = [c_int, ndpointer(c_double, flags='C_CONTIGUOUS'),
c_int, ndpointer(c_int, flags='C_CONTIGUOUS'), ndpointer(c_int, flags='C_CONTIGUOUS'),
c_double, c_double, c_double, c_int, c_double,
ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS')]
weighted_graphfl = graphfl_lib.graph_fused_lasso_weight_warm
weighted_graphfl.restype = c_int
weighted_graphfl.argtypes = [c_int, ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS'),
c_int, ndpointer(c_int, flags='C_CONTIGUOUS'), ndpointer(c_int, flags='C_CONTIGUOUS'),
c_double, c_double, c_double, c_int, c_double,
ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS')]
graphfl_lams = graphfl_lib.graph_fused_lasso_lams_warm
graphfl_lams.restype = c_int
graphfl_lams.argtypes = [c_int, ndpointer(c_double, flags='C_CONTIGUOUS'),
c_int, ndpointer(c_int, flags='C_CONTIGUOUS'), ndpointer(c_int, flags='C_CONTIGUOUS'),
ndpointer(c_double, flags='C_CONTIGUOUS'), c_double, c_double, c_int, c_double,
ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS')]
weighted_graphfl_lams = graphfl_lib.graph_fused_lasso_lams_weight_warm
weighted_graphfl_lams.restype = c_int
weighted_graphfl_lams.argtypes = [c_int, ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS'),
c_int, ndpointer(c_int, flags='C_CONTIGUOUS'), ndpointer(c_int, flags='C_CONTIGUOUS'),
ndpointer(c_double, flags='C_CONTIGUOUS'), c_double, c_double, c_int, c_double,
ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS'), ndpointer(c_double, flags='C_CONTIGUOUS')]
class TrailSolver:
def __init__(self, alpha=2., inflate=2., maxsteps=100000, converge=1e-6, penalty='gfl', max_dp_steps=5000, gamma=1.):
self.alpha = alpha
self.inflate = inflate
self.maxsteps = maxsteps
self.converge = converge
self.penalty = penalty
self.max_dp_steps = max_dp_steps
self.gamma = gamma
def set_data(self, y, edges, ntrails, trails, breakpoints, weights=None):
self.y = y
self.edges = edges if type(edges) is defaultdict else edge_map_from_edge_list(edges)
self.nnodes = len(y)
self.ntrails = ntrails
self.trails = trails
self.breakpoints = breakpoints
self.weights = weights
self.beta = np.zeros(self.nnodes, dtype='double')
self.z = np.zeros(self.breakpoints[-1], dtype='double')
self.u = np.zeros(self.breakpoints[-1], dtype='double')
self.steps = []
def set_values_only(self, y, weights=None):
self.y = y
self.weights = weights
def solve(self, lam):
'''Solves the GFL for a fixed value of lambda.'''
if self.penalty == 'dp':
return self.solve_dp(lam)
if self.penalty == 'gfl':
return self.solve_gfl(lam)
if self.penalty == 'gamlasso':
return self.solve_gfl(lam)
raise Exception('Unknown penalty type: {0}'.format(self.penalty))
def solve_gfl(self, lam):
if hasattr(lam, '__len__'):
if self.weights is None:
s = graphfl_lams(self.nnodes, self.y,
self.ntrails, self.trails, self.breakpoints,
lam,
self.alpha, self.inflate, self.maxsteps, self.converge,
self.beta, self.z, self.u)
else:
s = weighted_graphfl_lams(self.nnodes, self.y, self.weights,
self.ntrails, self.trails, self.breakpoints,
lam,
self.alpha, self.inflate, self.maxsteps, self.converge,
self.beta, self.z, self.u)
else:
if self.weights is None:
s = graphfl(self.nnodes, self.y,
self.ntrails, self.trails, self.breakpoints,
lam,
self.alpha, self.inflate, self.maxsteps, self.converge,
self.beta, self.z, self.u)
else:
s = weighted_graphfl(self.nnodes, self.y, self.weights,
self.ntrails, self.trails, self.breakpoints,
lam,
self.alpha, self.inflate, self.maxsteps, self.converge,
self.beta, self.z, self.u)
self.steps.append(s)
return self.beta
def solve_dp(self, lam):
'''Solves the Graph-fused double Pareto (non-convex, local optima only)'''
cur_converge = self.converge+1
step = 0
# Get an initial estimate using the GFL
self.solve_gfl(lam)
beta2 = np.copy(self.beta)
while cur_converge > self.converge and step < self.max_dp_steps:
# Weight each edge differently
u = lam / (1 + np.abs(self.beta[self.trails[::2]] - self.beta[self.trails[1::2]]))
# Swap the beta buffers
temp = self.beta
self.beta = beta2
beta2 = temp
# Solve the edge-weighted GFL problem, which updates beta
self.solve_gfl(u)
# Check for convergence
cur_converge = np.sqrt(((self.beta - beta2)**2).sum())
step += 1
self.steps.append(step)
return self.beta
def solve_gamlasso(self, lam):
'''Solves the Graph-fused gamma lasso via POSE (Taddy, 2013)'''
weights = lam / (1 + self.gamma * np.abs(self.beta[self.trails[::2]] - self.beta[self.trails[1::2]]))
s = self.solve_gfl(u)
self.steps.append(s)
return self.beta
def log_likelihood(self, beta):
return -0.5 * ((self.y - beta)**2).sum()
def solution_path(self, min_lambda, max_lambda, lambda_bins, verbose=0):
'''Follows the solution path to find the best lambda value.'''
lambda_grid = np.exp(np.linspace(np.log(max_lambda), np.log(min_lambda), lambda_bins))
aic_trace = np.zeros(lambda_grid.shape) # The AIC score for each lambda value
aicc_trace = np.zeros(lambda_grid.shape) # The AICc score for each lambda value (correcting for finite sample size)
bic_trace = np.zeros(lambda_grid.shape) # The BIC score for each lambda value
dof_trace = np.zeros(lambda_grid.shape) # The degrees of freedom of each final solution
log_likelihood_trace = np.zeros(lambda_grid.shape)
beta_trace = []
best_idx = None
best_plateaus = None
# Solve the series of lambda values with warm starts at each point
for i, lam in enumerate(lambda_grid):
if verbose:
print('#{0} Lambda = {1}'.format(i, lam))
# Fit to the final values
beta = self.solve(lam)
if verbose:
print('Calculating degrees of freedom')
# Count the number of free parameters in the grid (dof)
plateaus = calc_plateaus(beta, self.edges)
dof_trace[i] = len(plateaus)
if verbose:
print('Calculating AIC')
# Get the negative log-likelihood
log_likelihood_trace[i] = self.log_likelihood(beta)
# Calculate AIC = 2k - 2ln(L)
aic_trace[i] = 2. * dof_trace[i] - 2. * log_likelihood_trace[i]
# Calculate AICc = AIC + 2k * (k+1) / (n - k - 1)
aicc_trace[i] = aic_trace[i] + 2 * dof_trace[i] * (dof_trace[i]+1) / (len(beta) - dof_trace[i] - 1.)
# Calculate BIC = -2ln(L) + k * (ln(n) - ln(2pi))
bic_trace[i] = -2 * log_likelihood_trace[i] + dof_trace[i] * (np.log(len(beta)) - np.log(2 * np.pi))
# Track the best model thus far
if best_idx is None or bic_trace[i] < bic_trace[best_idx]:
best_idx = i
best_plateaus = plateaus
# Save the trace of all the resulting parameters
beta_trace.append(np.array(beta))
if verbose:
print('DoF: {0} AIC: {1} AICc: {2} BIC: {3}'.format(dof_trace[i], aic_trace[i], aicc_trace[i], bic_trace[i]))
if verbose:
print('Best setting (by BIC): lambda={0} [DoF: {1}, AIC: {2}, AICc: {3} BIC: {4}]'.format(lambda_grid[best_idx], dof_trace[best_idx], aic_trace[best_idx], aicc_trace[best_idx], bic_trace[best_idx]))
return {'aic': aic_trace,
'aicc': aicc_trace,
'bic': bic_trace,
'dof': dof_trace,
'loglikelihood': log_likelihood_trace,
'beta': np.array(beta_trace),
'lambda': lambda_grid,
'best_idx': best_idx,
'best': beta_trace[best_idx],
'plateaus': best_plateaus}
| tansey/gfl | pygfl/solver.py | Python | lgpl-3.0 | 10,478 |
import random, itertools, operator, types, pprint, contextlib, collections
import textwrap, string, pdb, copy, abc, functools
memoiziraj = functools.lru_cache(maxsize=None)
def djeljiv(m, n):
"""Je li m djeljiv s n?"""
return not m % n
def ispiši(automat):
"""Relativno uredan ispis (konačnog ili potisnog) automata."""
pprint.pprint(automat.komponente)
def Kartezijev_produkt(*skupovi):
"""Skup uređenih n-torki."""
return set(itertools.product(*skupovi))
def funkcija(f, domena, kodomena):
"""Je li f:domena->kodomena?"""
return f.keys() == domena and set(f.values()) <= kodomena
class fset(set):
"""Ponaša se kao frozenset, ispisuje se kao set."""
def __repr__(self):
return repr(set(self)) if self else '∅'
def __or__(self, other):
return fset(set(self) | set(other))
def __and__(self, other):
return fset(set(self) & set(other))
def __sub__(self, other):
return fset(set(self) - set(other))
def __xor__(self, other):
return fset(set(self) ^ set(other))
__ror__, __rand__, __rsub__, __rxor__ = __or__, __and__, __sub__, __xor__
def __hash__(self):
return hash(frozenset(self))
def __iand__(self, other):
return NotImplemented
def __ior__(self, other):
return NotImplemented
def __isub__(self, other):
return NotImplemented
def __ixor__(self, other):
return NotImplemented
def add(self, value):
raise TypeError('fset is immutable')
def clear(self):
raise TypeError('fset is immutable')
def difference_update(self, other):
raise TypeError('fset is immutable')
def intersection_update(self, other):
raise TypeError('fset is immutable')
def discard(self, value):
raise TypeError('fset is immutable')
def pop(self):
raise TypeError('fset is immutable')
def remove(self, value):
raise TypeError('fset is immutable')
def symmetric_difference_update(self, other):
raise TypeError('fset is immutable')
def update(self, other):
raise TypeError('fset is immutable')
def difference(self, other):
return self - other
def intersection(self, other):
return self & other
def symmetric_difference(self, other):
return self ^ other
def union(self, other):
return self | other
def copy(self):
return self
def __dir__(self):
return dir(frozenset)
def partitivni_skup(skup):
"""Skup svih podskupova zadanog skupa."""
return {fset(itertools.compress(skup, χ))
for χ in itertools.product({False, True}, repeat=len(skup))}
def relacija(R, *skupovi):
"""Je li R relacija među zadanim skupovima?"""
return R <= Kartezijev_produkt(*skupovi)
def sažmi(vrijednost):
"""Sažimanje 1-torki u njihove elemente. Ostale n-torke ne dira."""
with contextlib.suppress(TypeError, ValueError):
komponenta, = vrijednost
return komponenta
return vrijednost
def naniži(vrijednost):
"""Pretvaranje vrijednosti koja nije n-torka u 1-torku."""
return vrijednost if isinstance(vrijednost, tuple) else (vrijednost,)
def funkcija_iz_relacije(relacija, *domene):
"""Pretvara R⊆A×B×C×D×E (uz domene A, B) u f:A×B→℘(C×D×E)."""
m = len(domene)
funkcija = {sažmi(x): set() for x in Kartezijev_produkt(*domene)}
for n_torka in relacija:
assert len(n_torka) > m
for x_i, domena_i in zip(n_torka, domene):
assert x_i in domena_i
x, y = n_torka[:m], n_torka[m:]
if len(x) == 1: x, = x
if len(y) == 1: y, = y
funkcija[sažmi(x)].add(sažmi(y))
return funkcija
def relacija_iz_funkcije(funkcija):
"""Pretvara f:A×B→℘(C×D×E) u R⊆A×B×C×D×E."""
return {naniži(x) + naniži(y) for x, yi in funkcija.items() for y in yi}
def unija_familije(familija):
"""Unija familije skupova."""
return fset(x for skup in familija for x in skup)
def disjunktna_unija(*skupovi):
"""Unija skupova, osiguravajući da su u parovima disjunktni."""
for skup1, skup2 in itertools.combinations(skupovi, 2):
assert skup1.isdisjoint(skup2)
return set().union(*skupovi)
def ε_proširenje(Σ):
"""Σ∪{ε}"""
return disjunktna_unija(Σ, {ε})
def primijeni(pravilo, riječ, mjesto):
"""Primjenjuje gramatičko pravilo na zadanom mjestu (indeksu) u riječi."""
varijabla, *zamjena = pravilo
assert riječ[mjesto] == varijabla
rezultat = list(riječ[:mjesto]) + zamjena + list(riječ[mjesto+1:])
return ''.join(rezultat) if isinstance(riječ, str) else rezultat
class Kontraprimjer(Exception):
"""Jezik se ne slaže sa zadanom specifikacijom."""
def __init__(self, test, spec):
self.args = "Jezik {}sadrži {!r}".format('ne '*bool(spec), test),
class PrazanString(str):
"""Klasa koja određuje ponašanje objekta ε."""
def __add__(self, other):
return other
def __mul__(self, n):
return self
def __len__(self):
return 0
def __repr__(self):
return 'ε'
__radd__, __rmul__, __str__ = __add__, __mul__, __repr__
ε = PrazanString()
def parsiraj_tablicu_KA(tablica):
"""Parsiranje tabličnog zapisa konačnog automata (Sipser page 36).
Prvo stanje je početno, završna su označena znakom # na kraju reda."""
prva, *ostale = tablica.strip().splitlines()
znakovi = prva.split()
assert all(len(znak) == 1 for znak in znakovi)
abeceda = set(znakovi)
stanja, završna = set(), set()
prijelaz, početno = {}, None
for linija in ostale:
stanje, *dolazna = linija.split()
if početno is None: početno = stanje
extra = len(dolazna) - len(znakovi)
assert extra in {0, 1}
if extra == 1:
assert dolazna.pop() == '#'
završna.add(stanje)
for znak, dolazno in zip(znakovi, dolazna):
prijelaz[stanje, znak] = dolazno
stanja.add(stanje)
return stanja, abeceda, prijelaz, početno, završna
def parsiraj_tablicu_NKA(tablica):
"""Parsiranje tabličnog zapisa nedeterminističkog KA (Sipser page 54).
Prvo stanje je početno, završna su označena znakom # na kraju reda.
ε-prijelazi su nakon svih znak-prijelaza (stupac čije zaglavlje nema znaka).
Izostanak prijelaza označava se znakom / na odgovarajućem mjestu.
Višestruki prijelazi za isto stanje i znak razdvojeni su znakom /."""
prva, *ostale = tablica.strip().splitlines()
znakovi = prva.split()
assert all(len(znak) == 1 for znak in znakovi)
abeceda = set(znakovi)
stanja, završna = set(), set()
prijelaz, početno = set(), None
for linija in ostale:
stanje, *dolazna = linija.split()
if početno is None: početno = stanje
extra = len(dolazna) - len(znakovi)
assert extra >= 0
if extra > 0 and dolazna[~0] == '#':
del dolazna[~0]
završna.add(stanje)
for znak, dolazno in zip(znakovi, dolazna):
for dolazno1 in filter(None, dolazno.split('/')):
prijelaz.add((stanje, znak, dolazno1))
for dolazno in dolazna[len(znakovi):]:
for dolazno2 in dolazno.split('/'):
prijelaz.add((stanje, ε, dolazno2))
stanja.add(stanje)
return stanja, abeceda, prijelaz, početno, završna
def parsiraj_tablicu_PA(tablica):
"""Parsiranje tabličnog zapisa (relacije prijelaza) potisnog automata.
Svaki redak ima polazno stanje, čitani znak, pop znak, dolazno, push znak.
Prvo polazno stanje je početno, završna su označena znakom # na kraju reda.
ε se označava znakom /. Završno stanje iz kojeg ne izlazi strelica je #."""
stanja, abeceda, abeceda_stoga, prijelaz = set(), set(), set(), set()
početno, završna = None, set()
def dodaj(znak, skup):
if znak in {'/', 'ε'}:
return ε
skup.add(znak)
return znak
for linija in tablica.strip().splitlines():
trenutno_završno = False
ćelije = linija.split()
if len(ćelije) == 6:
assert ćelije.pop() == '#'
trenutno_završno = True
polazno, znak, stog_pop, dolazno, stog_push = ćelije
if početno is None:
početno = polazno
stanja |= {polazno, dolazno}
assert len(znak) == 1
znak = dodaj(znak, abeceda)
stog_pop = dodaj(stog_pop, abeceda_stoga)
stog_push = dodaj(stog_push, abeceda_stoga)
if trenutno_završno:
završna.add(polazno)
prijelaz.add((polazno, znak, stog_pop, dolazno, stog_push))
if '#' in stanja: završna.add('#')
return stanja, abeceda, abeceda_stoga, prijelaz, početno, završna
def parsiraj_tablicu_TS(tablica):
"""Parsiranje tabličnog zapisa (funkcije prijelaza) Turingovog stroja.
Prvo stanje je početno, . prihvaća, ! odbija (beskonačna petlja udesno).
Prijelazi: znak+stanje ili znak-stanje. Ako nešto ne piše, ostaje isto.
Praznina je _, i njena pozicija označava kraj ulazne abecede."""
prva, *ostale = tablica.strip().splitlines()
znakovi = prva.split()
assert all(len(znak) == 1 for znak in znakovi) and '_' in znakovi
radna_abeceda = set(znakovi)
abeceda = set(znakovi[:znakovi.index('_')])
stanja, prijelaz, početno = {'.'}, {}, None
for linija in ostale:
stanje, *prijelazi = linija.split()
if početno is None: početno = stanje
assert len(prijelazi) == len(znakovi)
for znak, trojka in zip(znakovi, prijelazi):
if trojka in set('.!'): nstanje, nznak, smjer = trojka, znak, ''
else:
for smjer in '+-':
nznak, smjer, nstanje = trojka.partition(smjer)
if smjer: break
assert smjer
if not nznak: nznak = znak
if not nstanje: nstanje = stanje
stanja.add(nstanje)
prijelaz[stanje, znak] = (nstanje, nznak, int(smjer + '1'))
stanja.add(stanje)
if '!' in stanja:
for znak in znakovi: prijelaz['!', znak] = ('!', znak, 1)
return stanja, abeceda, radna_abeceda, '_', prijelaz, početno, '.'
def parsiraj_strelice_BKG(strelice):
"""Čitanje gramatike zapisane u standardnom obliku pomoću strelica.
Svaki red je oblika varijabla -> ds1 | ds2 | ... (moguće desne strane).
ε se može i ne mora pisati. Prvi red s lijeve strane ima početnu varijablu.
Znakovi u svakoj desnoj strani moraju biti razdvojeni razmacima."""
varijable, simboli, pravila, početna = set(), set(), set(), None
for linija in strelice.strip().splitlines():
linija = linija.replace('->', ' -> ', 1)
varijabla, strelica, ostalo = linija.split(None, 2)
varijable.add(varijabla)
if početna is None:
početna = varijabla
for zamjena in ostalo.split('|'):
zamjene = tuple(zamjena.split())
if zamjene == ('ε',): zamjene = ()
pravila.add((varijabla,) + zamjene)
simboli.update(zamjene)
return varijable, simboli - varijable, pravila, početna
def strelice(gramatika):
"""Ispis gramatike u standardnom obliku pomoću strelicâ.
Pogledati funkciju util.parsiraj_strelice_BKG za detalje."""
grupe = {V: set() for V in gramatika.varijable}
for varijabla, *zamjene in gramatika.pravila:
grupe[varijabla].add(' '.join(map(str, zamjene)) or 'ε')
print(gramatika.početna, '->',
' | '.join(grupe.pop(gramatika.početna)) or '∅')
for varijabla, grupa in sorted(grupe.items()):
print(varijabla, '->', ' | '.join(grupa) or '∅')
print()
def slučajni_testovi(abeceda, koliko, maxduljina):
"""Generator slučajno odabranih riječi nad abecedom."""
znakovi = list(abeceda)
yield ε
for znak in znakovi:
yield znak,
for _ in range(koliko):
duljina = random.randint(2, maxduljina)
yield tuple(random.choice(znakovi) for _ in range(duljina))
def provjeri(objekt, specifikacija, koliko=999, maxduljina=9):
"""Osigurava da se objekt drži specifikacije, slučajnim testiranjem."""
import RI, BKG
if isinstance(objekt, RI.RegularniIzraz):
jezik = objekt.KA().prihvaća
elif isinstance(objekt, BKG.BeskontekstnaGramatika):
jezik = objekt.CYK
else:
jezik = objekt.prihvaća
for test in slučajni_testovi(objekt.abeceda, koliko, maxduljina):
lijevo = jezik(test)
if isinstance(test, tuple) and all(
isinstance(znak, str) and len(znak) == 1 for znak in test):
test = ''.join(test)
desno = specifikacija(test)
if lijevo != bool(desno):
raise Kontraprimjer(test, desno)
def označi1(stanje, *ls):
"""Dodaje oznaci stanja dodatne oznake u ls."""
return naniži(stanje) + ls
def novo(prefiks, iskorišteni):
"""Novi element koji počinje prefiksom i ne pripada skupu iskorišteni."""
if prefiks in iskorišteni:
for broj in itertools.count():
kandidat = prefiks + str(broj)
if kandidat not in iskorišteni:
return kandidat
return prefiks
def DOT_PA(automat):
"""Dijagram danog PA u formatu DOT. ε se piše kao e."""
Q, Σ, Γ, Δ, q0, F = automat.komponente
r = {q: i for i, q in enumerate(Q, 1)}
obrazac = [
'digraph {',
'rankdir = LR',
'node [ style = invis ] 0',
'node [ style = solid ]',
]
for oznaka, broj in r.items():
obrazac.append('node [ peripheries={}, label="{}" ] {}'
.format(2 if oznaka in F else 1, oznaka, broj))
obrazac.append('0 -> ' + str(r[q0]))
brid = collections.defaultdict(set)
for p, α, t, q, s in Δ:
brid[p, q, t, s].add(α)
for (p, q, t, s), znakovi in brid.items():
linija = ','.join(map(str, znakovi))
if not s == t == ε:
if linija == 'ε': linija = ''
else: linija += ','
linija += '{}:{}'.format(t if t != ε else '', s if s != ε else '')
obrazac.append('{} -> {} [ label="{}" ]'.format(r[p], r[q], linija))
obrazac.append('}')
return '\n'.join(obrazac).replace('ε', 'e')
def prikaz(stanje, pozicija, traka):
print(*traka[:pozicija], '[{}>'.format(stanje),
*traka[pozicija:], '_', sep='')
| vedgar/ip | Chomsky/util.py | Python | unlicense | 14,523 |
def zip(*arg):
Result = []
Check = 1
#check if every item in arg has the same length
for i in arg:
if len(i) != len(arg[0]):
print 'please make sure enter all items with the same length'
Check = 0
break
while (Check):
for j in range(0,len(arg[0])):
result = ()
for item in arg:
result = result + (item[j],)
Result.append(result)
Check = 0
return Result
def unzip(x):
Length = len(x[0])
result = ()
LIST = []
for i in range(0,len(x[0])):
LIST.append([],)
for item in x:
for j in range(0,len(LIST)):
LIST[j].append(item[j])
for k in LIST:
result = result + (k,)
return result
def Test():
print '#1 test: '
print ' zip([1,1,1],[2,2,2],[3,3,3],[4,4,4]) -->', zip([1,1,1],[2,2,2],[3,3,3],[4,4,4])
print '\n'
print ' unzip([(1,2,3,4,5),(2,3,4,5,6),(3,4,5,6,7)]) -->', unzip([(1,2,3,4,5),(2,3,4,5,6),(3,4,5,6,7)])
print '\n'
print '#2 test: unzip(zip([100,200,300],[200,300,400],[0,0,0]))'
print unzip(zip([100,200,300],[200,300,400], [0,0,0]))
print '\n'
if __name__ == '__main__':
Test()
| DataMonster/Python | exer/zipunzip/zip.py | Python | unlicense | 1,230 |
"""
WSGI config for board project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "board.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| Atom1c/home | board/board/wsgi.py | Python | unlicense | 385 |
from django.test import TestCase
from finance.models import Banking_Account
class IBANTestCase(TestCase):
def setUp(self):
pass
def test_iban_converter(self):
"""BBAN to IBAN conversion"""
self.assertEqual(Banking_Account.convertBBANToIBAN("091-0002777-90"), 'BE34091000277790')
self.assertEqual(Banking_Account.convertBBANToIBAN("679-2005502-27"), 'BE48679200550227')
self.assertEqual(Banking_Account.isBBAN("679-2005502-27"), True)
self.assertEqual(Banking_Account.isBBAN('BE48679200550227'), False)
self.assertEqual(Banking_Account.isIBAN("679-2005502-27"), False)
self.assertEqual(Banking_Account.isIBAN('BE48679200550227'), True)
| 317070/ppbe-finance | finance/tests.py | Python | unlicense | 725 |
"""Server API tests."""
import unittest
# Disable W0611: Unused import
import tests.treadmill_test_deps # pylint: disable=W0611
import mock
from treadmill import admin
from treadmill.api import server
class ApiServerTest(unittest.TestCase):
"""treadmill.api.server tests."""
def setUp(self):
self.svr = server.API()
def tearDown(self):
pass
@mock.patch('treadmill.context.AdminContext.conn',
mock.Mock(return_value=admin.Admin(None, None)))
@mock.patch('treadmill.admin.Server.list', mock.Mock(return_value=[]))
def test_list(self):
"""Dummy test for treadmill.api.server._list()"""
self.svr.list(None, None)
svr_admin = admin.Server(None)
self.assertTrue(svr_admin.list.called)
self.svr.list('some-cell', None)
svr_admin.list.assert_called_with({'cell': 'some-cell'})
self.svr.list(label='xxx')
svr_admin.list.assert_called_with({'label': 'xxx'})
self.svr.list('some-cell', 'xxx')
svr_admin.list.assert_called_with({'cell': 'some-cell',
'label': 'xxx'})
@mock.patch('treadmill.context.AdminContext.conn',
mock.Mock(return_value=admin.Admin(None, None)))
@mock.patch('treadmill.admin.Server.get',
mock.Mock(return_value={'_id': 'foo.somewhere.in.xx.com'}))
def test_get(self):
"""Dummy test for treadmill.api.server.get()"""
svr_admin = admin.Server(None)
self.svr.get('foo.somewhere.in.xx.com')
svr_admin.get.assert_called_with('foo.somewhere.in.xx.com')
@mock.patch('treadmill.context.AdminContext.conn',
mock.Mock(return_value=admin.Admin(None, None)))
@mock.patch('treadmill.admin.Server.get',
mock.Mock(return_value={'_id': 'foo.somewhere.in.xx.com'}))
@mock.patch('treadmill.admin.Server.create', mock.Mock())
def test_create(self):
"""Dummy test for treadmill.api.server.create()"""
svr_admin = admin.Server(None)
self.svr.create('foo.somewhere.in.xx.com', {'cell': 'ny-999-cell',
'label': 'xxx'})
svr_admin.get.assert_called_with('foo.somewhere.in.xx.com')
if __name__ == '__main__':
unittest.main()
| toenuff/treadmill | tests/api/server_test.py | Python | apache-2.0 | 2,316 |
# (C) Copyright 2014 Voyager Search
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import glob
def make_mo_files():
"""Utility function to generate MO files."""
po_files = glob.glob(os.path.join(os.path.dirname(__file__), 'LC_MESSAGES', '*.po'))
try:
sys.path.append(os.path.join(os.path.dirname(sys.executable), "tools", "i18n"))
import msgfmt
for po_file in po_files:
msgfmt.make(po_file, po_file.replace('.po', '.mo'))
except (IOError, ImportError):
pass
if __name__ == '__main__':
make_mo_files()
| voyagersearch/voyager-py | processing/locale/make_mo_files.py | Python | apache-2.0 | 1,085 |
from django.utils.translation import ugettext as _
from django.utils.timezone import now as timezone_now
from django.conf import settings
from django.core import validators
from django.core.exceptions import ValidationError
from django.db import connection, IntegrityError
from django.http import HttpRequest, HttpResponse
from typing import Dict, List, Set, Any, Iterable, \
Optional, Tuple, Union, Sequence, cast
from zerver.lib.exceptions import JsonableError, ErrorCode
from zerver.lib.html_diff import highlight_html_differences
from zerver.decorator import has_request_variables, \
REQ, to_non_negative_int
from django.utils.html import escape as escape_html
from zerver.lib import bugdown
from zerver.lib.zcommand import process_zcommands
from zerver.lib.actions import recipient_for_user_profiles, do_update_message_flags, \
compute_irc_user_fullname, compute_jabber_user_fullname, \
create_mirror_user_if_needed, check_send_message, do_update_message, \
extract_recipients, truncate_body, render_incoming_message, do_delete_messages, \
do_mark_all_as_read, do_mark_stream_messages_as_read, \
get_user_info_for_message_updates, check_schedule_message
from zerver.lib.addressee import get_user_profiles, get_user_profiles_by_ids
from zerver.lib.queue import queue_json_publish
from zerver.lib.message import (
access_message,
messages_for_ids,
render_markdown,
get_first_visible_message_id,
)
from zerver.lib.response import json_success, json_error
from zerver.lib.sqlalchemy_utils import get_sqlalchemy_connection
from zerver.lib.streams import access_stream_by_id, can_access_stream_history_by_name
from zerver.lib.timestamp import datetime_to_timestamp, convert_to_UTC
from zerver.lib.timezone import get_timezone
from zerver.lib.topic import (
topic_column_sa,
topic_match_sa,
user_message_exists_for_topic,
DB_TOPIC_NAME,
LEGACY_PREV_TOPIC,
MATCH_TOPIC,
REQ_topic,
)
from zerver.lib.topic_mutes import exclude_topic_mutes
from zerver.lib.utils import statsd
from zerver.lib.validator import \
check_list, check_int, check_dict, check_string, check_bool, check_string_or_int_list
from zerver.lib.zephyr import compute_mit_user_fullname
from zerver.models import Message, UserProfile, Stream, Subscription, Client,\
Realm, RealmDomain, Recipient, UserMessage, bulk_get_recipients, get_personal_recipient, \
get_stream, email_to_domain, get_realm, get_active_streams, \
get_user_including_cross_realm, get_stream_recipient
from sqlalchemy import func
from sqlalchemy.sql import select, join, column, literal_column, literal, and_, \
or_, not_, union_all, alias, Selectable, ColumnElement, table
from dateutil.parser import parse as dateparser
import re
import ujson
import datetime
LARGER_THAN_MAX_MESSAGE_ID = 10000000000000000
MAX_MESSAGES_PER_FETCH = 5000
class BadNarrowOperator(JsonableError):
code = ErrorCode.BAD_NARROW
data_fields = ['desc']
def __init__(self, desc: str) -> None:
self.desc = desc # type: str
@staticmethod
def msg_format() -> str:
return _('Invalid narrow operator: {desc}')
# TODO: Should be Select, but sqlalchemy stubs are busted
Query = Any
# TODO: should be Callable[[ColumnElement], ColumnElement], but sqlalchemy stubs are busted
ConditionTransform = Any
# When you add a new operator to this, also update zerver/lib/narrow.py
class NarrowBuilder:
'''
Build up a SQLAlchemy query to find messages matching a narrow.
'''
# This class has an important security invariant:
#
# None of these methods ever *add* messages to a query's result.
#
# That is, the `add_term` method, and its helpers the `by_*` methods,
# are passed a Query object representing a query for messages; they may
# call some methods on it, and then they return a resulting Query
# object. Things these methods may do to the queries they handle
# include
# * add conditions to filter out rows (i.e., messages), with `query.where`
# * add columns for more information on the same message, with `query.column`
# * add a join for more information on the same message
#
# Things they may not do include
# * anything that would pull in additional rows, or information on
# other messages.
def __init__(self, user_profile: UserProfile, msg_id_column: str) -> None:
self.user_profile = user_profile
self.msg_id_column = msg_id_column
self.user_realm = user_profile.realm
def add_term(self, query: Query, term: Dict[str, Any]) -> Query:
"""
Extend the given query to one narrowed by the given term, and return the result.
This method satisfies an important security property: the returned
query never includes a message that the given query didn't. In
particular, if the given query will only find messages that a given
user can legitimately see, then so will the returned query.
"""
# To maintain the security property, we hold all the `by_*`
# methods to the same criterion. See the class's block comment
# for details.
# We have to be careful here because we're letting users call a method
# by name! The prefix 'by_' prevents it from colliding with builtin
# Python __magic__ stuff.
operator = term['operator']
operand = term['operand']
negated = term.get('negated', False)
method_name = 'by_' + operator.replace('-', '_')
method = getattr(self, method_name, None)
if method is None:
raise BadNarrowOperator('unknown operator ' + operator)
if negated:
maybe_negate = not_
else:
maybe_negate = lambda cond: cond
return method(query, operand, maybe_negate)
def by_has(self, query: Query, operand: str, maybe_negate: ConditionTransform) -> Query:
if operand not in ['attachment', 'image', 'link']:
raise BadNarrowOperator("unknown 'has' operand " + operand)
col_name = 'has_' + operand
cond = column(col_name)
return query.where(maybe_negate(cond))
def by_in(self, query: Query, operand: str, maybe_negate: ConditionTransform) -> Query:
if operand == 'home':
conditions = exclude_muting_conditions(self.user_profile, [])
return query.where(and_(*conditions))
elif operand == 'all':
return query
raise BadNarrowOperator("unknown 'in' operand " + operand)
def by_is(self, query: Query, operand: str, maybe_negate: ConditionTransform) -> Query:
if operand == 'private':
cond = column("flags").op("&")(UserMessage.flags.is_private.mask) != 0
return query.where(maybe_negate(cond))
elif operand == 'starred':
cond = column("flags").op("&")(UserMessage.flags.starred.mask) != 0
return query.where(maybe_negate(cond))
elif operand == 'unread':
cond = column("flags").op("&")(UserMessage.flags.read.mask) == 0
return query.where(maybe_negate(cond))
elif operand == 'mentioned':
cond1 = column("flags").op("&")(UserMessage.flags.mentioned.mask) != 0
cond2 = column("flags").op("&")(UserMessage.flags.wildcard_mentioned.mask) != 0
cond = or_(cond1, cond2)
return query.where(maybe_negate(cond))
elif operand == 'alerted':
cond = column("flags").op("&")(UserMessage.flags.has_alert_word.mask) != 0
return query.where(maybe_negate(cond))
raise BadNarrowOperator("unknown 'is' operand " + operand)
_alphanum = frozenset(
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
def _pg_re_escape(self, pattern: str) -> str:
"""
Escape user input to place in a regex
Python's re.escape escapes unicode characters in a way which postgres
fails on, '\u03bb' to '\\\u03bb'. This function will correctly escape
them for postgres, '\u03bb' to '\\u03bb'.
"""
s = list(pattern)
for i, c in enumerate(s):
if c not in self._alphanum:
if ord(c) >= 128:
# convert the character to hex postgres regex will take
# \uXXXX
s[i] = '\\u{:0>4x}'.format(ord(c))
else:
s[i] = '\\' + c
return ''.join(s)
def by_stream(self, query: Query, operand: str, maybe_negate: ConditionTransform) -> Query:
try:
# Because you can see your own message history for
# private streams you are no longer subscribed to, we
# need get_stream, not access_stream, here.
stream = get_stream(operand, self.user_profile.realm)
except Stream.DoesNotExist:
raise BadNarrowOperator('unknown stream ' + operand)
if self.user_profile.realm.is_zephyr_mirror_realm:
# MIT users expect narrowing to "social" to also show messages to
# /^(un)*social(.d)*$/ (unsocial, ununsocial, social.d, ...).
# In `ok_to_include_history`, we assume that a non-negated
# `stream` term for a public stream will limit the query to
# that specific stream. So it would be a bug to hit this
# codepath after relying on this term there. But all streams in
# a Zephyr realm are private, so that doesn't happen.
assert(not stream.is_public())
m = re.search(r'^(?:un)*(.+?)(?:\.d)*$', stream.name, re.IGNORECASE)
# Since the regex has a `.+` in it and "" is invalid as a
# stream name, this will always match
assert(m is not None)
base_stream_name = m.group(1)
matching_streams = get_active_streams(self.user_profile.realm).filter(
name__iregex=r'^(un)*%s(\.d)*$' % (self._pg_re_escape(base_stream_name),))
matching_stream_ids = [matching_stream.id for matching_stream in matching_streams]
recipients_map = bulk_get_recipients(Recipient.STREAM, matching_stream_ids)
cond = column("recipient_id").in_([recipient.id for recipient in recipients_map.values()])
return query.where(maybe_negate(cond))
recipient = get_stream_recipient(stream.id)
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
def by_topic(self, query: Query, operand: str, maybe_negate: ConditionTransform) -> Query:
if self.user_profile.realm.is_zephyr_mirror_realm:
# MIT users expect narrowing to topic "foo" to also show messages to /^foo(.d)*$/
# (foo, foo.d, foo.d.d, etc)
m = re.search(r'^(.*?)(?:\.d)*$', operand, re.IGNORECASE)
# Since the regex has a `.*` in it, this will always match
assert(m is not None)
base_topic = m.group(1)
# Additionally, MIT users expect the empty instance and
# instance "personal" to be the same.
if base_topic in ('', 'personal', '(instance "")'):
cond = or_(
topic_match_sa(""),
topic_match_sa(".d"),
topic_match_sa(".d.d"),
topic_match_sa(".d.d.d"),
topic_match_sa(".d.d.d.d"),
topic_match_sa("personal"),
topic_match_sa("personal.d"),
topic_match_sa("personal.d.d"),
topic_match_sa("personal.d.d.d"),
topic_match_sa("personal.d.d.d.d"),
topic_match_sa('(instance "")'),
topic_match_sa('(instance "").d'),
topic_match_sa('(instance "").d.d'),
topic_match_sa('(instance "").d.d.d'),
topic_match_sa('(instance "").d.d.d.d'),
)
else:
# We limit `.d` counts, since postgres has much better
# query planning for this than they do for a regular
# expression (which would sometimes table scan).
cond = or_(
topic_match_sa(base_topic),
topic_match_sa(base_topic + ".d"),
topic_match_sa(base_topic + ".d.d"),
topic_match_sa(base_topic + ".d.d.d"),
topic_match_sa(base_topic + ".d.d.d.d"),
)
return query.where(maybe_negate(cond))
cond = topic_match_sa(operand)
return query.where(maybe_negate(cond))
def by_sender(self, query: Query, operand: str, maybe_negate: ConditionTransform) -> Query:
try:
sender = get_user_including_cross_realm(operand, self.user_realm)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + operand)
cond = column("sender_id") == literal(sender.id)
return query.where(maybe_negate(cond))
def by_near(self, query: Query, operand: str, maybe_negate: ConditionTransform) -> Query:
return query
def by_id(self, query: Query, operand: str, maybe_negate: ConditionTransform) -> Query:
if not str(operand).isdigit():
raise BadNarrowOperator("Invalid message ID")
cond = self.msg_id_column == literal(operand)
return query.where(maybe_negate(cond))
def by_pm_with(self, query: Query, operand: Union[str, Iterable[int]],
maybe_negate: ConditionTransform) -> Query:
try:
if isinstance(operand, str):
email_list = operand.split(",")
user_profiles = get_user_profiles(
emails=email_list,
realm=self.user_realm
)
else:
"""
This is where we handle passing a list of user IDs for the narrow, which is the
preferred/cleaner API.
"""
user_profiles = get_user_profiles_by_ids(
user_ids=operand,
realm=self.user_realm
)
recipient = recipient_for_user_profiles(user_profiles=user_profiles,
forwarded_mirror_message=False,
forwarder_user_profile=None,
sender=self.user_profile,
allow_deactivated=True)
except (JsonableError, ValidationError):
raise BadNarrowOperator('unknown user in ' + str(operand))
# Group DM
if recipient.type == Recipient.HUDDLE:
cond = column("recipient_id") == recipient.id
return query.where(maybe_negate(cond))
# 1:1 PM
other_participant = None
# Find if another person is in PM
for user in user_profiles:
if user.id != self.user_profile.id:
other_participant = user
# PM with another person
if other_participant:
# We need bidirectional messages PM with another person.
# But Recipient.PERSONAL objects only encode the person who
# received the message, and not the other participant in
# the thread (the sender), we need to do a somewhat
# complex query to get messages between these two users
# with either of them as the sender.
self_recipient = get_personal_recipient(self.user_profile.id)
cond = or_(and_(column("sender_id") == other_participant.id,
column("recipient_id") == self_recipient.id),
and_(column("sender_id") == self.user_profile.id,
column("recipient_id") == recipient.id))
return query.where(maybe_negate(cond))
# PM with self
cond = and_(column("sender_id") == self.user_profile.id,
column("recipient_id") == recipient.id)
return query.where(maybe_negate(cond))
def by_group_pm_with(self, query: Query, operand: str,
maybe_negate: ConditionTransform) -> Query:
try:
narrow_profile = get_user_including_cross_realm(operand, self.user_realm)
except UserProfile.DoesNotExist:
raise BadNarrowOperator('unknown user ' + str(operand))
self_recipient_ids = [
recipient_tuple['recipient_id'] for recipient_tuple
in Subscription.objects.filter(
user_profile=self.user_profile,
recipient__type=Recipient.HUDDLE
).values("recipient_id")]
narrow_recipient_ids = [
recipient_tuple['recipient_id'] for recipient_tuple
in Subscription.objects.filter(
user_profile=narrow_profile,
recipient__type=Recipient.HUDDLE
).values("recipient_id")]
recipient_ids = set(self_recipient_ids) & set(narrow_recipient_ids)
cond = column("recipient_id").in_(recipient_ids)
return query.where(maybe_negate(cond))
def by_search(self, query: Query, operand: str, maybe_negate: ConditionTransform) -> Query:
if settings.USING_PGROONGA:
return self._by_search_pgroonga(query, operand, maybe_negate)
else:
return self._by_search_tsearch(query, operand, maybe_negate)
def _by_search_pgroonga(self, query: Query, operand: str,
maybe_negate: ConditionTransform) -> Query:
match_positions_character = func.pgroonga_match_positions_character
query_extract_keywords = func.pgroonga_query_extract_keywords
operand_escaped = func.escape_html(operand)
keywords = query_extract_keywords(operand_escaped)
query = query.column(match_positions_character(column("rendered_content"),
keywords).label("content_matches"))
query = query.column(match_positions_character(func.escape_html(topic_column_sa()),
keywords).label("topic_matches"))
condition = column("search_pgroonga").op("&@~")(operand_escaped)
return query.where(maybe_negate(condition))
def _by_search_tsearch(self, query: Query, operand: str,
maybe_negate: ConditionTransform) -> Query:
tsquery = func.plainto_tsquery(literal("zulip.english_us_search"), literal(operand))
ts_locs_array = func.ts_match_locs_array
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
column("rendered_content"),
tsquery).label("content_matches"))
# We HTML-escape the topic in Postgres to avoid doing a server round-trip
query = query.column(ts_locs_array(literal("zulip.english_us_search"),
func.escape_html(topic_column_sa()),
tsquery).label("topic_matches"))
# Do quoted string matching. We really want phrase
# search here so we can ignore punctuation and do
# stemming, but there isn't a standard phrase search
# mechanism in Postgres
for term in re.findall(r'"[^"]+"|\S+', operand):
if term[0] == '"' and term[-1] == '"':
term = term[1:-1]
term = '%' + connection.ops.prep_for_like_query(term) + '%'
cond = or_(column("content").ilike(term),
topic_column_sa().ilike(term))
query = query.where(maybe_negate(cond))
cond = column("search_tsvector").op("@@")(tsquery)
return query.where(maybe_negate(cond))
# The offsets we get from PGroonga are counted in characters
# whereas the offsets from tsearch_extras are in bytes, so we
# have to account for both cases in the logic below.
def highlight_string(text: str, locs: Iterable[Tuple[int, int]]) -> str:
highlight_start = '<span class="highlight">'
highlight_stop = '</span>'
pos = 0
result = ''
in_tag = False
text_utf8 = text.encode('utf8')
for loc in locs:
(offset, length) = loc
# These indexes are in byte space for tsearch,
# and they are in string space for pgroonga.
prefix_start = pos
prefix_end = offset
match_start = offset
match_end = offset + length
if settings.USING_PGROONGA:
prefix = text[prefix_start:prefix_end]
match = text[match_start:match_end]
else:
prefix = text_utf8[prefix_start:prefix_end].decode()
match = text_utf8[match_start:match_end].decode()
for character in (prefix + match):
if character == '<':
in_tag = True
elif character == '>':
in_tag = False
if in_tag:
result += prefix
result += match
else:
result += prefix
result += highlight_start
result += match
result += highlight_stop
pos = match_end
if settings.USING_PGROONGA:
final_frag = text[pos:]
else:
final_frag = text_utf8[pos:].decode()
result += final_frag
return result
def get_search_fields(rendered_content: str, topic_name: str, content_matches: Iterable[Tuple[int, int]],
topic_matches: Iterable[Tuple[int, int]]) -> Dict[str, str]:
return {
'match_content': highlight_string(rendered_content, content_matches),
MATCH_TOPIC: highlight_string(escape_html(topic_name), topic_matches),
}
def narrow_parameter(json: str) -> Optional[List[Dict[str, Any]]]:
data = ujson.loads(json)
if not isinstance(data, list):
raise ValueError("argument is not a list")
if len(data) == 0:
# The "empty narrow" should be None, and not []
return None
def convert_term(elem: Union[Dict[str, Any], List[str]]) -> Dict[str, Any]:
# We have to support a legacy tuple format.
if isinstance(elem, list):
if (len(elem) != 2 or
any(not isinstance(x, str) and not isinstance(x, str)
for x in elem)):
raise ValueError("element is not a string pair")
return dict(operator=elem[0], operand=elem[1])
if isinstance(elem, dict):
user_ids_supported_operators = ['pm-with']
if elem.get('operator', '') in user_ids_supported_operators:
operand_validator = check_string_or_int_list
else:
operand_validator = check_string
validator = check_dict([
('operator', check_string),
('operand', operand_validator),
])
error = validator('elem', elem)
if error:
raise JsonableError(error)
# whitelist the fields we care about for now
return dict(
operator=elem['operator'],
operand=elem['operand'],
negated=elem.get('negated', False),
)
raise ValueError("element is not a dictionary")
return list(map(convert_term, data))
def ok_to_include_history(narrow: Optional[Iterable[Dict[str, Any]]], user_profile: UserProfile) -> bool:
# There are occasions where we need to find Message rows that
# have no corresponding UserMessage row, because the user is
# reading a public stream that might include messages that
# were sent while the user was not subscribed, but which they are
# allowed to see. We have to be very careful about constructing
# queries in those situations, so this function should return True
# only if we are 100% sure that we're gonna add a clause to the
# query that narrows to a particular public stream on the user's realm.
# If we screw this up, then we can get into a nasty situation of
# polluting our narrow results with messages from other realms.
include_history = False
if narrow is not None:
for term in narrow:
if term['operator'] == "stream" and not term.get('negated', False):
if can_access_stream_history_by_name(user_profile, term['operand']):
include_history = True
# Disable historical messages if the user is narrowing on anything
# that's a property on the UserMessage table. There cannot be
# historical messages in these cases anyway.
for term in narrow:
if term['operator'] == "is":
include_history = False
return include_history
def get_stream_name_from_narrow(narrow: Optional[Iterable[Dict[str, Any]]]) -> Optional[str]:
if narrow is not None:
for term in narrow:
if term['operator'] == 'stream':
return term['operand'].lower()
return None
def exclude_muting_conditions(user_profile: UserProfile,
narrow: Optional[Iterable[Dict[str, Any]]]) -> List[Selectable]:
conditions = []
stream_name = get_stream_name_from_narrow(narrow)
stream_id = None
if stream_name is not None:
try:
# Note that this code works around a lint rule that
# says we should use access_stream_by_name to get the
# stream. It is okay here, because we are only using
# the stream id to exclude data, not to include results.
stream_id = get_stream(stream_name, user_profile.realm).id
except Stream.DoesNotExist:
pass
if stream_id is None:
rows = Subscription.objects.filter(
user_profile=user_profile,
active=True,
is_muted=True,
recipient__type=Recipient.STREAM
).values('recipient_id')
muted_recipient_ids = [row['recipient_id'] for row in rows]
if len(muted_recipient_ids) > 0:
# Only add the condition if we have muted streams to simplify/avoid warnings.
condition = not_(column("recipient_id").in_(muted_recipient_ids))
conditions.append(condition)
conditions = exclude_topic_mutes(conditions, user_profile, stream_id)
return conditions
def get_base_query_for_search(user_profile: UserProfile,
need_message: bool,
need_user_message: bool) -> Tuple[Query, ColumnElement]:
if need_message and need_user_message:
query = select([column("message_id"), column("flags")],
column("user_profile_id") == literal(user_profile.id),
join(table("zerver_usermessage"), table("zerver_message"),
literal_column("zerver_usermessage.message_id") ==
literal_column("zerver_message.id")))
inner_msg_id_col = column("message_id")
return (query, inner_msg_id_col)
if need_user_message:
query = select([column("message_id"), column("flags")],
column("user_profile_id") == literal(user_profile.id),
table("zerver_usermessage"))
inner_msg_id_col = column("message_id")
return (query, inner_msg_id_col)
else:
assert(need_message)
query = select([column("id").label("message_id")],
None,
table("zerver_message"))
inner_msg_id_col = literal_column("zerver_message.id")
return (query, inner_msg_id_col)
def add_narrow_conditions(user_profile: UserProfile,
inner_msg_id_col: ColumnElement,
query: Query,
narrow: Optional[List[Dict[str, Any]]]) -> Tuple[Query, bool]:
is_search = False # for now
if narrow is None:
return (query, is_search)
# Build the query for the narrow
builder = NarrowBuilder(user_profile, inner_msg_id_col)
search_operands = []
# As we loop through terms, builder does most of the work to extend
# our query, but we need to collect the search operands and handle
# them after the loop.
for term in narrow:
if term['operator'] == 'search':
search_operands.append(term['operand'])
else:
query = builder.add_term(query, term)
if search_operands:
is_search = True
query = query.column(topic_column_sa()).column(column("rendered_content"))
search_term = dict(
operator='search',
operand=' '.join(search_operands)
)
query = builder.add_term(query, search_term)
return (query, is_search)
def find_first_unread_anchor(sa_conn: Any,
user_profile: UserProfile,
narrow: Optional[List[Dict[str, Any]]]) -> int:
# We always need UserMessage in our query, because it has the unread
# flag for the user.
need_user_message = True
# Because we will need to call exclude_muting_conditions, unless
# the user hasn't muted anything, we will need to include Message
# in our query. It may be worth eventually adding an optimization
# for the case of a user who hasn't muted anything to avoid the
# join in that case, but it's low priority.
need_message = True
query, inner_msg_id_col = get_base_query_for_search(
user_profile=user_profile,
need_message=need_message,
need_user_message=need_user_message,
)
query, is_search = add_narrow_conditions(
user_profile=user_profile,
inner_msg_id_col=inner_msg_id_col,
query=query,
narrow=narrow,
)
condition = column("flags").op("&")(UserMessage.flags.read.mask) == 0
# We exclude messages on muted topics when finding the first unread
# message in this narrow
muting_conditions = exclude_muting_conditions(user_profile, narrow)
if muting_conditions:
condition = and_(condition, *muting_conditions)
# The mobile app uses narrow=[] and use_first_unread_anchor=True to
# determine what messages to show when you first load the app.
# Unfortunately, this means that if you have a years-old unread
# message, the mobile app could get stuck in the past.
#
# To fix this, we enforce that the "first unread anchor" must be on or
# after the user's current pointer location. Since the pointer
# location refers to the latest the user has read in the home view,
# we'll only apply this logic in the home view (ie, when narrow is
# empty).
if not narrow:
pointer_condition = inner_msg_id_col >= user_profile.pointer
condition = and_(condition, pointer_condition)
first_unread_query = query.where(condition)
first_unread_query = first_unread_query.order_by(inner_msg_id_col.asc()).limit(1)
first_unread_result = list(sa_conn.execute(first_unread_query).fetchall())
if len(first_unread_result) > 0:
anchor = first_unread_result[0][0]
else:
anchor = LARGER_THAN_MAX_MESSAGE_ID
return anchor
@has_request_variables
def zcommand_backend(request: HttpRequest, user_profile: UserProfile,
command: str=REQ('command')) -> HttpResponse:
return json_success(process_zcommands(command, user_profile))
@has_request_variables
def get_messages_backend(request: HttpRequest, user_profile: UserProfile,
anchor: int=REQ(converter=int, default=None),
num_before: int=REQ(converter=to_non_negative_int),
num_after: int=REQ(converter=to_non_negative_int),
narrow: Optional[List[Dict[str, Any]]]=REQ('narrow', converter=narrow_parameter,
default=None),
use_first_unread_anchor: bool=REQ(validator=check_bool, default=False),
client_gravatar: bool=REQ(validator=check_bool, default=False),
apply_markdown: bool=REQ(validator=check_bool, default=True)) -> HttpResponse:
if anchor is None and not use_first_unread_anchor:
return json_error(_("Missing 'anchor' argument (or set 'use_first_unread_anchor'=True)."))
if num_before + num_after > MAX_MESSAGES_PER_FETCH:
return json_error(_("Too many messages requested (maximum %s).")
% (MAX_MESSAGES_PER_FETCH,))
if user_profile.realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS:
# If email addresses are only available to administrators,
# clients cannot compute gravatars, so we force-set it to false.
client_gravatar = False
include_history = ok_to_include_history(narrow, user_profile)
if include_history:
# The initial query in this case doesn't use `zerver_usermessage`,
# and isn't yet limited to messages the user is entitled to see!
#
# This is OK only because we've made sure this is a narrow that
# will cause us to limit the query appropriately later.
# See `ok_to_include_history` for details.
need_message = True
need_user_message = False
elif narrow is None:
# We need to limit to messages the user has received, but we don't actually
# need any fields from Message
need_message = False
need_user_message = True
else:
need_message = True
need_user_message = True
query, inner_msg_id_col = get_base_query_for_search(
user_profile=user_profile,
need_message=need_message,
need_user_message=need_user_message,
)
query, is_search = add_narrow_conditions(
user_profile=user_profile,
inner_msg_id_col=inner_msg_id_col,
query=query,
narrow=narrow,
)
if narrow is not None:
# Add some metadata to our logging data for narrows
verbose_operators = []
for term in narrow:
if term['operator'] == "is":
verbose_operators.append("is:" + term['operand'])
else:
verbose_operators.append(term['operator'])
request._log_data['extra'] = "[%s]" % (",".join(verbose_operators),)
sa_conn = get_sqlalchemy_connection()
if use_first_unread_anchor:
anchor = find_first_unread_anchor(
sa_conn,
user_profile,
narrow,
)
anchored_to_left = (anchor == 0)
# Set value that will be used to short circuit the after_query
# altogether and avoid needless conditions in the before_query.
anchored_to_right = (anchor == LARGER_THAN_MAX_MESSAGE_ID)
if anchored_to_right:
num_after = 0
first_visible_message_id = get_first_visible_message_id(user_profile.realm)
query = limit_query_to_range(
query=query,
num_before=num_before,
num_after=num_after,
anchor=anchor,
anchored_to_left=anchored_to_left,
anchored_to_right=anchored_to_right,
id_col=inner_msg_id_col,
first_visible_message_id=first_visible_message_id,
)
main_query = alias(query)
query = select(main_query.c, None, main_query).order_by(column("message_id").asc())
# This is a hack to tag the query we use for testing
query = query.prefix_with("/* get_messages */")
rows = list(sa_conn.execute(query).fetchall())
query_info = post_process_limited_query(
rows=rows,
num_before=num_before,
num_after=num_after,
anchor=anchor,
anchored_to_left=anchored_to_left,
anchored_to_right=anchored_to_right,
first_visible_message_id=first_visible_message_id,
)
rows = query_info['rows']
# The following is a little messy, but ensures that the code paths
# are similar regardless of the value of include_history. The
# 'user_messages' dictionary maps each message to the user's
# UserMessage object for that message, which we will attach to the
# rendered message dict before returning it. We attempt to
# bulk-fetch rendered message dicts from remote cache using the
# 'messages' list.
message_ids = [] # type: List[int]
user_message_flags = {} # type: Dict[int, List[str]]
if include_history:
message_ids = [row[0] for row in rows]
# TODO: This could be done with an outer join instead of two queries
um_rows = UserMessage.objects.filter(user_profile=user_profile,
message__id__in=message_ids)
user_message_flags = {um.message_id: um.flags_list() for um in um_rows}
for message_id in message_ids:
if message_id not in user_message_flags:
user_message_flags[message_id] = ["read", "historical"]
else:
for row in rows:
message_id = row[0]
flags = row[1]
user_message_flags[message_id] = UserMessage.flags_list_for_flags(flags)
message_ids.append(message_id)
search_fields = dict() # type: Dict[int, Dict[str, str]]
if is_search:
for row in rows:
message_id = row[0]
(topic_name, rendered_content, content_matches, topic_matches) = row[-4:]
try:
search_fields[message_id] = get_search_fields(rendered_content, topic_name,
content_matches, topic_matches)
except UnicodeDecodeError as err: # nocoverage
# No coverage for this block since it should be
# impossible, and we plan to remove it once we've
# debugged the case that makes it happen.
raise Exception(str(err), message_id, narrow)
message_list = messages_for_ids(
message_ids=message_ids,
user_message_flags=user_message_flags,
search_fields=search_fields,
apply_markdown=apply_markdown,
client_gravatar=client_gravatar,
allow_edit_history=user_profile.realm.allow_edit_history,
)
statsd.incr('loaded_old_messages', len(message_list))
ret = dict(
messages=message_list,
result='success',
msg='',
found_anchor=query_info['found_anchor'],
found_oldest=query_info['found_oldest'],
found_newest=query_info['found_newest'],
history_limited=query_info['history_limited'],
anchor=anchor,
)
return json_success(ret)
def limit_query_to_range(query: Query,
num_before: int,
num_after: int,
anchor: int,
anchored_to_left: bool,
anchored_to_right: bool,
id_col: ColumnElement,
first_visible_message_id: int) -> Query:
'''
This code is actually generic enough that we could move it to a
library, but our only caller for now is message search.
'''
need_before_query = (not anchored_to_left) and (num_before > 0)
need_after_query = (not anchored_to_right) and (num_after > 0)
need_both_sides = need_before_query and need_after_query
# The semantics of our flags are as follows:
#
# num_after = number of rows < anchor
# num_after = number of rows > anchor
#
# But we also want the row where id == anchor (if it exists),
# and we don't want to union up to 3 queries. So in some cases
# we do things like `after_limit = num_after + 1` to grab the
# anchor row in the "after" query.
#
# Note that in some cases, if the anchor row isn't found, we
# actually may fetch an extra row at one of the extremes.
if need_both_sides:
before_anchor = anchor - 1
after_anchor = max(anchor, first_visible_message_id)
before_limit = num_before
after_limit = num_after + 1
elif need_before_query:
before_anchor = anchor
before_limit = num_before
if not anchored_to_right:
before_limit += 1
elif need_after_query:
after_anchor = max(anchor, first_visible_message_id)
after_limit = num_after + 1
if need_before_query:
before_query = query
if not anchored_to_right:
before_query = before_query.where(id_col <= before_anchor)
before_query = before_query.order_by(id_col.desc())
before_query = before_query.limit(before_limit)
if need_after_query:
after_query = query
if not anchored_to_left:
after_query = after_query.where(id_col >= after_anchor)
after_query = after_query.order_by(id_col.asc())
after_query = after_query.limit(after_limit)
if need_both_sides:
query = union_all(before_query.self_group(), after_query.self_group())
elif need_before_query:
query = before_query
elif need_after_query:
query = after_query
else:
# If we don't have either a before_query or after_query, it's because
# some combination of num_before/num_after/anchor are zero or
# use_first_unread_anchor logic found no unread messages.
#
# The most likely reason is somebody is doing an id search, so searching
# for something like `message_id = 42` is exactly what we want. In other
# cases, which could possibly be buggy API clients, at least we will
# return at most one row here.
query = query.where(id_col == anchor)
return query
def post_process_limited_query(rows: List[Any],
num_before: int,
num_after: int,
anchor: int,
anchored_to_left: bool,
anchored_to_right: bool,
first_visible_message_id: int) -> Dict[str, Any]:
# Our queries may have fetched extra rows if they added
# "headroom" to the limits, but we want to truncate those
# rows.
#
# Also, in cases where we had non-zero values of num_before or
# num_after, we want to know found_oldest and found_newest, so
# that the clients will know that they got complete results.
if first_visible_message_id > 0:
visible_rows = [r for r in rows if r[0] >= first_visible_message_id]
else:
visible_rows = rows
rows_limited = len(visible_rows) != len(rows)
if anchored_to_right:
num_after = 0
before_rows = visible_rows[:]
anchor_rows = [] # type: List[Any]
after_rows = [] # type: List[Any]
else:
before_rows = [r for r in visible_rows if r[0] < anchor]
anchor_rows = [r for r in visible_rows if r[0] == anchor]
after_rows = [r for r in visible_rows if r[0] > anchor]
if num_before:
before_rows = before_rows[-1 * num_before:]
if num_after:
after_rows = after_rows[:num_after]
visible_rows = before_rows + anchor_rows + after_rows
found_anchor = len(anchor_rows) == 1
found_oldest = anchored_to_left or (len(before_rows) < num_before)
found_newest = anchored_to_right or (len(after_rows) < num_after)
# BUG: history_limited is incorrect False in the event that we had
# to bump `anchor` up due to first_visible_message_id, and there
# were actually older messages. This may be a rare event in the
# context where history_limited is relevant, because it can only
# happen in one-sided queries with no num_before (see tests tagged
# BUG in PostProcessTest for examples), and we don't generally do
# those from the UI, so this might be OK for now.
#
# The correct fix for this probably involves e.g. making a
# `before_query` when we increase `anchor` just to confirm whether
# messages were hidden.
history_limited = rows_limited and found_oldest
return dict(
rows=visible_rows,
found_anchor=found_anchor,
found_newest=found_newest,
found_oldest=found_oldest,
history_limited=history_limited,
)
@has_request_variables
def update_message_flags(request: HttpRequest, user_profile: UserProfile,
messages: List[int]=REQ(validator=check_list(check_int)),
operation: str=REQ('op'), flag: str=REQ()) -> HttpResponse:
count = do_update_message_flags(user_profile, request.client, operation, flag, messages)
target_count_str = str(len(messages))
log_data_str = "[%s %s/%s] actually %s" % (operation, flag, target_count_str, count)
request._log_data["extra"] = log_data_str
return json_success({'result': 'success',
'messages': messages,
'msg': ''})
@has_request_variables
def mark_all_as_read(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
count = do_mark_all_as_read(user_profile, request.client)
log_data_str = "[%s updated]" % (count,)
request._log_data["extra"] = log_data_str
return json_success({'result': 'success',
'msg': ''})
@has_request_variables
def mark_stream_as_read(request: HttpRequest,
user_profile: UserProfile,
stream_id: int=REQ(validator=check_int)) -> HttpResponse:
stream, recipient, sub = access_stream_by_id(user_profile, stream_id)
count = do_mark_stream_messages_as_read(user_profile, request.client, stream)
log_data_str = "[%s updated]" % (count,)
request._log_data["extra"] = log_data_str
return json_success({'result': 'success',
'msg': ''})
@has_request_variables
def mark_topic_as_read(request: HttpRequest,
user_profile: UserProfile,
stream_id: int=REQ(validator=check_int),
topic_name: str=REQ()) -> HttpResponse:
stream, recipient, sub = access_stream_by_id(user_profile, stream_id)
if topic_name:
topic_exists = user_message_exists_for_topic(
user_profile=user_profile,
recipient=recipient,
topic_name=topic_name,
)
if not topic_exists:
raise JsonableError(_('No such topic \'%s\'') % (topic_name,))
count = do_mark_stream_messages_as_read(user_profile, request.client, stream, topic_name)
log_data_str = "[%s updated]" % (count,)
request._log_data["extra"] = log_data_str
return json_success({'result': 'success',
'msg': ''})
class InvalidMirrorInput(Exception):
pass
def create_mirrored_message_users(request: HttpRequest, user_profile: UserProfile,
recipients: Iterable[str]) -> UserProfile:
if "sender" not in request.POST:
raise InvalidMirrorInput("No sender")
sender_email = request.POST["sender"].strip().lower()
referenced_users = set([sender_email])
if request.POST['type'] == 'private':
for email in recipients:
referenced_users.add(email.lower())
if request.client.name == "zephyr_mirror":
user_check = same_realm_zephyr_user
fullname_function = compute_mit_user_fullname
elif request.client.name == "irc_mirror":
user_check = same_realm_irc_user
fullname_function = compute_irc_user_fullname
elif request.client.name in ("jabber_mirror", "JabberMirror"):
user_check = same_realm_jabber_user
fullname_function = compute_jabber_user_fullname
else:
raise InvalidMirrorInput("Unrecognized mirroring client")
for email in referenced_users:
# Check that all referenced users are in our realm:
if not user_check(user_profile, email):
raise InvalidMirrorInput("At least one user cannot be mirrored")
# Create users for the referenced users, if needed.
for email in referenced_users:
create_mirror_user_if_needed(user_profile.realm, email, fullname_function)
sender = get_user_including_cross_realm(sender_email, user_profile.realm)
return sender
def same_realm_zephyr_user(user_profile: UserProfile, email: str) -> bool:
#
# Are the sender and recipient both addresses in the same Zephyr
# mirroring realm? We have to handle this specially, inferring
# the domain from the e-mail address, because the recipient may
# not existing in Zulip and we may need to make a stub Zephyr
# mirroring user on the fly.
try:
validators.validate_email(email)
except ValidationError:
return False
domain = email_to_domain(email)
# Assumes allow_subdomains=False for all RealmDomain's corresponding to
# these realms.
return user_profile.realm.is_zephyr_mirror_realm and \
RealmDomain.objects.filter(realm=user_profile.realm, domain=domain).exists()
def same_realm_irc_user(user_profile: UserProfile, email: str) -> bool:
# Check whether the target email address is an IRC user in the
# same realm as user_profile, i.e. if the domain were example.com,
# the IRC user would need to be username@irc.example.com
try:
validators.validate_email(email)
except ValidationError:
return False
domain = email_to_domain(email).replace("irc.", "")
# Assumes allow_subdomains=False for all RealmDomain's corresponding to
# these realms.
return RealmDomain.objects.filter(realm=user_profile.realm, domain=domain).exists()
def same_realm_jabber_user(user_profile: UserProfile, email: str) -> bool:
try:
validators.validate_email(email)
except ValidationError:
return False
# If your Jabber users have a different email domain than the
# Zulip users, this is where you would do any translation.
domain = email_to_domain(email)
# Assumes allow_subdomains=False for all RealmDomain's corresponding to
# these realms.
return RealmDomain.objects.filter(realm=user_profile.realm, domain=domain).exists()
def handle_deferred_message(sender: UserProfile, client: Client,
message_type_name: str,
message_to: Union[Sequence[str], Sequence[int]],
topic_name: Optional[str],
message_content: str, delivery_type: str,
defer_until: str, tz_guess: Optional[str],
forwarder_user_profile: UserProfile,
realm: Optional[Realm]) -> HttpResponse:
deliver_at = None
local_tz = 'UTC'
if tz_guess:
local_tz = tz_guess
elif sender.timezone:
local_tz = sender.timezone
try:
deliver_at = dateparser(defer_until)
except ValueError:
return json_error(_("Invalid time format"))
deliver_at_usertz = deliver_at
if deliver_at_usertz.tzinfo is None:
user_tz = get_timezone(local_tz)
# Since mypy is not able to recognize localize and normalize as attributes of tzinfo we use ignore.
deliver_at_usertz = user_tz.normalize(user_tz.localize(deliver_at)) # type: ignore # Reason in comment on previous line.
deliver_at = convert_to_UTC(deliver_at_usertz)
if deliver_at <= timezone_now():
return json_error(_("Time must be in the future."))
check_schedule_message(sender, client, message_type_name, message_to,
topic_name, message_content, delivery_type,
deliver_at, realm=realm,
forwarder_user_profile=forwarder_user_profile)
return json_success({"deliver_at": str(deliver_at_usertz)})
# We do not @require_login for send_message_backend, since it is used
# both from the API and the web service. Code calling
# send_message_backend should either check the API key or check that
# the user is logged in.
@has_request_variables
def send_message_backend(request: HttpRequest, user_profile: UserProfile,
message_type_name: str=REQ('type'),
message_to: Union[Sequence[int], Sequence[str]]=REQ(
'to', converter=extract_recipients, default=[]),
forged: bool=REQ(default=False,
documentation_pending=True),
topic_name: Optional[str]=REQ_topic(),
message_content: str=REQ('content'),
widget_content: Optional[str]=REQ(default=None,
documentation_pending=True),
realm_str: Optional[str]=REQ('realm_str', default=None,
documentation_pending=True),
local_id: Optional[str]=REQ(default=None,
documentation_pending=True),
queue_id: Optional[str]=REQ(default=None,
documentation_pending=True),
delivery_type: Optional[str]=REQ('delivery_type', default='send_now',
documentation_pending=True),
defer_until: Optional[str]=REQ('deliver_at', default=None,
documentation_pending=True),
tz_guess: Optional[str]=REQ('tz_guess', default=None,
documentation_pending=True)
) -> HttpResponse:
client = request.client
is_super_user = request.user.is_api_super_user
if forged and not is_super_user:
return json_error(_("User not authorized for this query"))
realm = None
if realm_str and realm_str != user_profile.realm.string_id:
if not is_super_user:
# The email gateway bot needs to be able to send messages in
# any realm.
return json_error(_("User not authorized for this query"))
try:
realm = get_realm(realm_str)
except Realm.DoesNotExist:
return json_error(_("Unknown organization '%s'") % (realm_str,))
if client.name in ["zephyr_mirror", "irc_mirror", "jabber_mirror", "JabberMirror"]:
# Here's how security works for mirroring:
#
# For private messages, the message must be (1) both sent and
# received exclusively by users in your realm, and (2)
# received by the forwarding user.
#
# For stream messages, the message must be (1) being forwarded
# by an API superuser for your realm and (2) being sent to a
# mirrored stream.
#
# The security checks are split between the below code
# (especially create_mirrored_message_users which checks the
# same-realm constraint) and recipient_for_emails (which
# checks that PMs are received by the forwarding user)
if "sender" not in request.POST:
return json_error(_("Missing sender"))
if message_type_name != "private" and not is_super_user:
return json_error(_("User not authorized for this query"))
# For now, mirroring only works with recipient emails, not for
# recipient user IDs.
if not all(isinstance(to_item, str) for to_item in message_to):
return json_error(_("Mirroring not allowed with recipient user IDs"))
# We need this manual cast so that mypy doesn't complain about
# create_mirrored_message_users not being able to accept a Sequence[int]
# type parameter.
message_to = cast(Sequence[str], message_to)
try:
mirror_sender = create_mirrored_message_users(request, user_profile, message_to)
except InvalidMirrorInput:
return json_error(_("Invalid mirrored message"))
if client.name == "zephyr_mirror" and not user_profile.realm.is_zephyr_mirror_realm:
return json_error(_("Zephyr mirroring is not allowed in this organization"))
sender = mirror_sender
else:
sender = user_profile
if (delivery_type == 'send_later' or delivery_type == 'remind') and defer_until is None:
return json_error(_("Missing deliver_at in a request for delayed message delivery"))
if (delivery_type == 'send_later' or delivery_type == 'remind') and defer_until is not None:
return handle_deferred_message(sender, client, message_type_name,
message_to, topic_name, message_content,
delivery_type, defer_until, tz_guess,
forwarder_user_profile=user_profile,
realm=realm)
ret = check_send_message(sender, client, message_type_name, message_to,
topic_name, message_content, forged=forged,
forged_timestamp = request.POST.get('time'),
forwarder_user_profile=user_profile, realm=realm,
local_id=local_id, sender_queue_id=queue_id,
widget_content=widget_content)
return json_success({"id": ret})
def fill_edit_history_entries(message_history: List[Dict[str, Any]], message: Message) -> None:
"""This fills out the message edit history entries from the database,
which are designed to have the minimum data possible, to instead
have the current topic + content as of that time, plus data on
whatever changed. This makes it much simpler to do future
processing.
Note that this mutates what is passed to it, which is sorta a bad pattern.
"""
prev_content = message.content
prev_rendered_content = message.rendered_content
prev_topic = message.topic_name()
# Make sure that the latest entry in the history corresponds to the
# message's last edit time
if len(message_history) > 0:
assert message.last_edit_time is not None
assert(datetime_to_timestamp(message.last_edit_time) ==
message_history[0]['timestamp'])
for entry in message_history:
entry['topic'] = prev_topic
if LEGACY_PREV_TOPIC in entry:
prev_topic = entry[LEGACY_PREV_TOPIC]
entry['prev_topic'] = prev_topic
del entry[LEGACY_PREV_TOPIC]
entry['content'] = prev_content
entry['rendered_content'] = prev_rendered_content
if 'prev_content' in entry:
del entry['prev_rendered_content_version']
prev_content = entry['prev_content']
prev_rendered_content = entry['prev_rendered_content']
assert prev_rendered_content is not None
entry['content_html_diff'] = highlight_html_differences(
prev_rendered_content,
entry['rendered_content'],
message.id)
message_history.append(dict(
topic = prev_topic,
content = prev_content,
rendered_content = prev_rendered_content,
timestamp = datetime_to_timestamp(message.pub_date),
user_id = message.sender_id,
))
@has_request_variables
def get_message_edit_history(request: HttpRequest, user_profile: UserProfile,
message_id: int=REQ(converter=to_non_negative_int)) -> HttpResponse:
if not user_profile.realm.allow_edit_history:
return json_error(_("Message edit history is disabled in this organization"))
message, ignored_user_message = access_message(user_profile, message_id)
# Extract the message edit history from the message
if message.edit_history is not None:
message_edit_history = ujson.loads(message.edit_history)
else:
message_edit_history = []
# Fill in all the extra data that will make it usable
fill_edit_history_entries(message_edit_history, message)
return json_success({"message_history": reversed(message_edit_history)})
@has_request_variables
def update_message_backend(request: HttpRequest, user_profile: UserMessage,
message_id: int=REQ(converter=to_non_negative_int),
topic_name: Optional[str]=REQ_topic(),
propagate_mode: Optional[str]=REQ(default="change_one"),
content: Optional[str]=REQ(default=None)) -> HttpResponse:
if not user_profile.realm.allow_message_editing:
return json_error(_("Your organization has turned off message editing"))
message, ignored_user_message = access_message(user_profile, message_id)
is_no_topic_msg = (message.topic_name() == "(no topic)")
# You only have permission to edit a message if:
# you change this value also change those two parameters in message_edit.js.
# 1. You sent it, OR:
# 2. This is a topic-only edit for a (no topic) message, OR:
# 3. This is a topic-only edit and you are an admin, OR:
# 4. This is a topic-only edit and your realm allows users to edit topics.
if message.sender == user_profile:
pass
elif (content is None) and (is_no_topic_msg or
user_profile.is_realm_admin or
user_profile.realm.allow_community_topic_editing):
pass
else:
raise JsonableError(_("You don't have permission to edit this message"))
# If there is a change to the content, check that it hasn't been too long
# Allow an extra 20 seconds since we potentially allow editing 15 seconds
# past the limit, and in case there are network issues, etc. The 15 comes
# from (min_seconds_to_edit + seconds_left_buffer) in message_edit.js; if
# you change this value also change those two parameters in message_edit.js.
edit_limit_buffer = 20
if content is not None and user_profile.realm.message_content_edit_limit_seconds > 0:
deadline_seconds = user_profile.realm.message_content_edit_limit_seconds + edit_limit_buffer
if (timezone_now() - message.pub_date) > datetime.timedelta(seconds=deadline_seconds):
raise JsonableError(_("The time limit for editing this message has passed"))
# If there is a change to the topic, check that the user is allowed to
# edit it and that it has not been too long. If this is not the user who
# sent the message, they are not the admin, and the time limit for editing
# topics is passed, raise an error.
if content is None and message.sender != user_profile and not user_profile.is_realm_admin and \
not is_no_topic_msg:
deadline_seconds = Realm.DEFAULT_COMMUNITY_TOPIC_EDITING_LIMIT_SECONDS + edit_limit_buffer
if (timezone_now() - message.pub_date) > datetime.timedelta(seconds=deadline_seconds):
raise JsonableError(_("The time limit for editing this message has passed"))
if topic_name is None and content is None:
return json_error(_("Nothing to change"))
if topic_name is not None:
topic_name = topic_name.strip()
if topic_name == "":
raise JsonableError(_("Topic can't be empty"))
rendered_content = None
links_for_embed = set() # type: Set[str]
prior_mention_user_ids = set() # type: Set[int]
mention_user_ids = set() # type: Set[int]
if content is not None:
content = content.strip()
if content == "":
content = "(deleted)"
content = truncate_body(content)
user_info = get_user_info_for_message_updates(message.id)
prior_mention_user_ids = user_info['mention_user_ids']
# We render the message using the current user's realm; since
# the cross-realm bots never edit messages, this should be
# always correct.
# Note: If rendering fails, the called code will raise a JsonableError.
rendered_content = render_incoming_message(message,
content,
user_info['message_user_ids'],
user_profile.realm)
links_for_embed |= message.links_for_preview
mention_user_ids = message.mentions_user_ids
number_changed = do_update_message(user_profile, message, topic_name,
propagate_mode, content, rendered_content,
prior_mention_user_ids,
mention_user_ids)
# Include the number of messages changed in the logs
request._log_data['extra'] = "[%s]" % (number_changed,)
if links_for_embed and bugdown.url_embed_preview_enabled(message):
event_data = {
'message_id': message.id,
'message_content': message.content,
# The choice of `user_profile.realm_id` rather than
# `sender.realm_id` must match the decision made in the
# `render_incoming_message` call earlier in this function.
'message_realm_id': user_profile.realm_id,
'urls': links_for_embed}
queue_json_publish('embed_links', event_data)
return json_success()
def validate_can_delete_message(user_profile: UserProfile, message: Message) -> None:
if user_profile.is_realm_admin:
# Admin can delete any message, any time.
return
if message.sender != user_profile:
# Users can only delete messages sent by them.
raise JsonableError(_("You don't have permission to delete this message"))
if not user_profile.realm.allow_message_deleting:
# User can not delete message, if message deleting is not allowed in realm.
raise JsonableError(_("You don't have permission to delete this message"))
deadline_seconds = user_profile.realm.message_content_delete_limit_seconds
if deadline_seconds == 0:
# 0 for no time limit to delete message
return
if (timezone_now() - message.pub_date) > datetime.timedelta(seconds=deadline_seconds):
# User can not delete message after deadline time of realm
raise JsonableError(_("The time limit for deleting this message has passed"))
return
@has_request_variables
def delete_message_backend(request: HttpRequest, user_profile: UserProfile,
message_id: int=REQ(converter=to_non_negative_int)) -> HttpResponse:
message, ignored_user_message = access_message(user_profile, message_id)
validate_can_delete_message(user_profile, message)
try:
do_delete_messages(user_profile, [message])
except (Message.DoesNotExist, IntegrityError):
raise JsonableError(_("Message already deleted"))
return json_success()
@has_request_variables
def json_fetch_raw_message(request: HttpRequest, user_profile: UserProfile,
message_id: int=REQ(converter=to_non_negative_int)) -> HttpResponse:
(message, user_message) = access_message(user_profile, message_id)
return json_success({"raw_content": message.content})
@has_request_variables
def render_message_backend(request: HttpRequest, user_profile: UserProfile,
content: str=REQ()) -> HttpResponse:
message = Message()
message.sender = user_profile
message.content = content
message.sending_client = request.client
rendered_content = render_markdown(message, content, realm=user_profile.realm)
return json_success({"rendered": rendered_content})
@has_request_variables
def messages_in_narrow_backend(request: HttpRequest, user_profile: UserProfile,
msg_ids: List[int]=REQ(validator=check_list(check_int)),
narrow: Optional[List[Dict[str, Any]]]=REQ(converter=narrow_parameter)
) -> HttpResponse:
first_visible_message_id = get_first_visible_message_id(user_profile.realm)
msg_ids = [message_id for message_id in msg_ids if message_id >= first_visible_message_id]
# This query is limited to messages the user has access to because they
# actually received them, as reflected in `zerver_usermessage`.
query = select([column("message_id"), topic_column_sa(), column("rendered_content")],
and_(column("user_profile_id") == literal(user_profile.id),
column("message_id").in_(msg_ids)),
join(table("zerver_usermessage"), table("zerver_message"),
literal_column("zerver_usermessage.message_id") ==
literal_column("zerver_message.id")))
builder = NarrowBuilder(user_profile, column("message_id"))
if narrow is not None:
for term in narrow:
query = builder.add_term(query, term)
sa_conn = get_sqlalchemy_connection()
query_result = list(sa_conn.execute(query).fetchall())
search_fields = dict()
for row in query_result:
message_id = row['message_id']
topic_name = row[DB_TOPIC_NAME]
rendered_content = row['rendered_content']
if 'content_matches' in row:
content_matches = row['content_matches']
topic_matches = row['topic_matches']
search_fields[message_id] = get_search_fields(rendered_content, topic_name,
content_matches, topic_matches)
else:
search_fields[message_id] = {
'match_content': rendered_content,
MATCH_TOPIC: escape_html(topic_name),
}
return json_success({"messages": search_fields})
| rishig/zulip | zerver/views/messages.py | Python | apache-2.0 | 69,921 |
from plone.app.testing import PloneWithPackageLayer
from plone.app.testing import IntegrationTesting
from plone.app.testing import FunctionalTesting
import groupdocs.comparison
GROUPDOCS_COMPARISON = PloneWithPackageLayer(
zcml_package=groupdocs.comparison,
zcml_filename='testing.zcml',
gs_profile_id='groupdocs.comparison:testing',
name="GROUPDOCS_COMPARISON")
GROUPDOCS_COMPARISON_INTEGRATION = IntegrationTesting(
bases=(GROUPDOCS_COMPARISON, ),
name="GROUPDOCS_COMPARISON_INTEGRATION")
GROUPDOCS_COMPARISON_FUNCTIONAL = FunctionalTesting(
bases=(GROUPDOCS_COMPARISON, ),
name="GROUPDOCS_COMPARISON_FUNCTIONAL")
| liosha2007/plone-groupdocs-comparison-source | src/groupdocs/comparison/testing.py | Python | apache-2.0 | 653 |
"""Resets the password for virtual machine. The virtual machine must be in a "Stopped" state and the template must already support this feature for this command to take effect. [async]"""
from baseCmd import *
from baseResponse import *
class resetPasswordForVirtualMachineCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""The ID of the virtual machine"""
"""Required"""
self.id = None
self.typeInfo['id'] = 'uuid'
self.required = ["id", ]
class resetPasswordForVirtualMachineResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the ID of the virtual machine"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account associated with the virtual machine"""
self.account = None
self.typeInfo['account'] = 'string'
"""the number of cpu this virtual machine is running with"""
self.cpunumber = None
self.typeInfo['cpunumber'] = 'integer'
"""the speed of each cpu"""
self.cpuspeed = None
self.typeInfo['cpuspeed'] = 'integer'
"""the amount of the vm's CPU currently used"""
self.cpuused = None
self.typeInfo['cpuused'] = 'string'
"""the date when this virtual machine was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""Vm details in key/value pairs."""
self.details = None
self.typeInfo['details'] = 'map'
"""the read (io) of disk on the vm"""
self.diskioread = None
self.typeInfo['diskioread'] = 'long'
"""the write (io) of disk on the vm"""
self.diskiowrite = None
self.typeInfo['diskiowrite'] = 'long'
"""the read (bytes) of disk on the vm"""
self.diskkbsread = None
self.typeInfo['diskkbsread'] = 'long'
"""the write (bytes) of disk on the vm"""
self.diskkbswrite = None
self.typeInfo['diskkbswrite'] = 'long'
"""the ID of the disk offering of the virtual machine"""
self.diskofferingid = None
self.typeInfo['diskofferingid'] = 'string'
"""the name of the disk offering of the virtual machine"""
self.diskofferingname = None
self.typeInfo['diskofferingname'] = 'string'
"""user generated name. The name of the virtual machine is returned if no displayname exists."""
self.displayname = None
self.typeInfo['displayname'] = 'string'
"""an optional field whether to the display the vm to the end user or not."""
self.displayvm = None
self.typeInfo['displayvm'] = 'boolean'
"""the name of the domain in which the virtual machine exists"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the ID of the domain in which the virtual machine exists"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the virtual network for the service offering"""
self.forvirtualnetwork = None
self.typeInfo['forvirtualnetwork'] = 'boolean'
"""the group name of the virtual machine"""
self.group = None
self.typeInfo['group'] = 'string'
"""the group ID of the virtual machine"""
self.groupid = None
self.typeInfo['groupid'] = 'string'
"""Os type ID of the virtual machine"""
self.guestosid = None
self.typeInfo['guestosid'] = 'string'
"""true if high-availability is enabled, false otherwise"""
self.haenable = None
self.typeInfo['haenable'] = 'boolean'
"""the ID of the host for the virtual machine"""
self.hostid = None
self.typeInfo['hostid'] = 'string'
"""the name of the host for the virtual machine"""
self.hostname = None
self.typeInfo['hostname'] = 'string'
"""the hypervisor on which the template runs"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""instance name of the user vm; this parameter is returned to the ROOT admin only"""
self.instancename = None
self.typeInfo['instancename'] = 'string'
"""true if vm contains XS tools inorder to support dynamic scaling of VM cpu/memory."""
self.isdynamicallyscalable = None
self.typeInfo['isdynamicallyscalable'] = 'boolean'
"""an alternate display text of the ISO attached to the virtual machine"""
self.isodisplaytext = None
self.typeInfo['isodisplaytext'] = 'string'
"""the ID of the ISO attached to the virtual machine"""
self.isoid = None
self.typeInfo['isoid'] = 'string'
"""the name of the ISO attached to the virtual machine"""
self.isoname = None
self.typeInfo['isoname'] = 'string'
"""ssh key-pair"""
self.keypair = None
self.typeInfo['keypair'] = 'string'
"""the memory allocated for the virtual machine"""
self.memory = None
self.typeInfo['memory'] = 'integer'
"""the name of the virtual machine"""
self.name = None
self.typeInfo['name'] = 'string'
"""the incoming network traffic on the vm"""
self.networkkbsread = None
self.typeInfo['networkkbsread'] = 'long'
"""the outgoing network traffic on the host"""
self.networkkbswrite = None
self.typeInfo['networkkbswrite'] = 'long'
"""OS type id of the vm"""
self.ostypeid = None
self.typeInfo['ostypeid'] = 'long'
"""the password (if exists) of the virtual machine"""
self.password = None
self.typeInfo['password'] = 'string'
"""true if the password rest feature is enabled, false otherwise"""
self.passwordenabled = None
self.typeInfo['passwordenabled'] = 'boolean'
"""the project name of the vm"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the vm"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""public IP address id associated with vm via Static nat rule"""
self.publicip = None
self.typeInfo['publicip'] = 'string'
"""public IP address id associated with vm via Static nat rule"""
self.publicipid = None
self.typeInfo['publicipid'] = 'string'
"""device ID of the root volume"""
self.rootdeviceid = None
self.typeInfo['rootdeviceid'] = 'long'
"""device type of the root volume"""
self.rootdevicetype = None
self.typeInfo['rootdevicetype'] = 'string'
"""the ID of the service offering of the virtual machine"""
self.serviceofferingid = None
self.typeInfo['serviceofferingid'] = 'string'
"""the name of the service offering of the virtual machine"""
self.serviceofferingname = None
self.typeInfo['serviceofferingname'] = 'string'
"""State of the Service from LB rule"""
self.servicestate = None
self.typeInfo['servicestate'] = 'string'
"""the state of the virtual machine"""
self.state = None
self.typeInfo['state'] = 'string'
"""an alternate display text of the template for the virtual machine"""
self.templatedisplaytext = None
self.typeInfo['templatedisplaytext'] = 'string'
"""the ID of the template for the virtual machine. A -1 is returned if the virtual machine was created from an ISO file."""
self.templateid = None
self.typeInfo['templateid'] = 'string'
"""the name of the template for the virtual machine"""
self.templatename = None
self.typeInfo['templatename'] = 'string'
"""the user's ID who deployed the virtual machine"""
self.userid = None
self.typeInfo['userid'] = 'string'
"""the user's name who deployed the virtual machine"""
self.username = None
self.typeInfo['username'] = 'string'
"""the vgpu type used by the virtual machine"""
self.vgpu = None
self.typeInfo['vgpu'] = 'string'
"""the ID of the availablility zone for the virtual machine"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the name of the availability zone for the virtual machine"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""list of affinity groups associated with the virtual machine"""
self.affinitygroup = []
"""the list of nics associated with vm"""
self.nic = []
"""list of security groups associated with the virtual machine"""
self.securitygroup = []
"""the list of resource tags associated with vm"""
self.tags = []
"""the ID of the latest async job acting on this object"""
self.jobid = None
self.typeInfo['jobid'] = ''
"""the current status of the latest async job acting on this object"""
self.jobstatus = None
self.typeInfo['jobstatus'] = ''
class affinitygroup:
def __init__(self):
""""the ID of the affinity group"""
self.id = None
""""the account owning the affinity group"""
self.account = None
""""the description of the affinity group"""
self.description = None
""""the domain name of the affinity group"""
self.domain = None
""""the domain ID of the affinity group"""
self.domainid = None
""""the name of the affinity group"""
self.name = None
""""the project name of the affinity group"""
self.project = None
""""the project ID of the affinity group"""
self.projectid = None
""""the type of the affinity group"""
self.type = None
""""virtual machine IDs associated with this affinity group"""
self.virtualmachineIds = None
class nic:
def __init__(self):
""""the ID of the nic"""
self.id = None
""""the broadcast uri of the nic"""
self.broadcasturi = None
""""device id for the network when plugged into the virtual machine"""
self.deviceid = None
""""the gateway of the nic"""
self.gateway = None
""""the IPv6 address of network"""
self.ip6address = None
""""the cidr of IPv6 network"""
self.ip6cidr = None
""""the gateway of IPv6 network"""
self.ip6gateway = None
""""the ip address of the nic"""
self.ipaddress = None
""""true if nic is default, false otherwise"""
self.isdefault = None
""""the isolation uri of the nic"""
self.isolationuri = None
""""true if nic is default, false otherwise"""
self.macaddress = None
""""the netmask of the nic"""
self.netmask = None
""""the ID of the corresponding network"""
self.networkid = None
""""the name of the corresponding network"""
self.networkname = None
""""the Secondary ipv4 addr of nic"""
self.secondaryip = None
""""the traffic type of the nic"""
self.traffictype = None
""""the type of the nic"""
self.type = None
""""Id of the vm to which the nic belongs"""
self.virtualmachineid = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class egressrule:
def __init__(self):
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class ingressrule:
def __init__(self):
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
class securitygroup:
def __init__(self):
""""the ID of the security group"""
self.id = None
""""the account owning the security group"""
self.account = None
""""the description of the security group"""
self.description = None
""""the domain name of the security group"""
self.domain = None
""""the domain ID of the security group"""
self.domainid = None
""""the name of the security group"""
self.name = None
""""the project name of the group"""
self.project = None
""""the project id of the group"""
self.projectid = None
""""the number of virtualmachines associated with this securitygroup"""
self.virtualmachinecount = None
""""the list of virtualmachine ids associated with this securitygroup"""
self.virtualmachineids = None
""""the list of egress rules associated with the security group"""
self.egressrule = []
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the list of ingress rules associated with the security group"""
self.ingressrule = []
""""account owning the security group rule"""
self.account = None
""""the CIDR notation for the base IP address of the security group rule"""
self.cidr = None
""""the ending IP of the security group rule"""
self.endport = None
""""the code for the ICMP message response"""
self.icmpcode = None
""""the type of the ICMP message response"""
self.icmptype = None
""""the protocol of the security group rule"""
self.protocol = None
""""the id of the security group rule"""
self.ruleid = None
""""security group name"""
self.securitygroupname = None
""""the starting IP of the security group rule"""
self.startport = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the list of resource tags associated with the rule"""
self.tags = []
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
""""the ID of the latest async job acting on this object"""
self.jobid = None
""""the current status of the latest async job acting on this object"""
self.jobstatus = None
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
| MissionCriticalCloud/marvin | marvin/cloudstackAPI/resetPasswordForVirtualMachine.py | Python | apache-2.0 | 24,253 |
from .base_backend import BaseBackend
class MlpBackend(BaseBackend):
def __init__(self, inpmulti, hidmulti, outmulti, learning_rate, inp96, hid96, out96, path, buffsize, mean, std, statspath):
from neupre.misc.builders import build_model_mlp
super(MlpBackend, self).__init__(int(buffsize))
self.model_multistep = build_model_mlp(inpmulti, hidmulti, outmulti)
self.model_onestep96 = build_model_mlp(inp96, hid96, out96)
self.initialize(False, path, mean, std, statspath)
def train(self):
log2 = self.model_multistep.fit(self.X_train_multistep, self.y_train_multistep, batch_size=10, nb_epoch=2,
validation_split=0.1, verbose=1)
log3 = self.model_onestep96.fit(self.X_train_onestep96, self.y_train_onestep96, batch_size=10, nb_epoch=2,
validation_split=0.1, verbose=1)
return [log2, log3]
def predict(self, X_test_multistep, X_test_onestep96):
p2 = self.model_multistep.predict(X_test_multistep)
p3 = self.model_onestep96.predict(X_test_onestep96)
return [p2, p3]
| horvathpeter/neural_prediction | neupre/backend/onlinemlp_backend.py | Python | apache-2.0 | 1,147 |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
import mock
import uuid
from rally.deploy.engines import devstack
from rally.openstack.common import test
SAMPLE_CONFIG = {
'name': 'DevstackEngine',
'provider': {
'name': 'ExistingServers',
'credentials': [{'user': 'root', 'host': 'example.com'}],
},
'localrc': {
'ADMIN_PASSWORD': 'secret',
},
}
DEVSTACK_REPO = 'https://github.com/openstack-dev/devstack.git'
class DevstackEngineTestCase(test.BaseTestCase):
def setUp(self):
super(DevstackEngineTestCase, self).setUp()
self.deployment = {
'uuid': str(uuid.uuid4()),
'config': SAMPLE_CONFIG,
}
self.engine = devstack.DevstackEngine(self.deployment)
def test_invalid_config(self):
self.deployment = SAMPLE_CONFIG.copy()
self.deployment['config'] = {'name': 42}
self.assertRaises(jsonschema.ValidationError,
devstack.DevstackEngine, self.deployment)
def test_construct(self):
self.assertEqual(self.engine.localrc['ADMIN_PASSWORD'], 'secret')
@mock.patch('rally.deploy.engines.devstack.open', create=True)
def test_prepare_server(self, m_open):
m_open.return_value = 'fake_file'
server = mock.Mock()
server.password = 'secret'
self.engine.prepare_server(server)
calls = [
mock.call('/bin/sh -e', stdin='fake_file'),
mock.call('chpasswd', stdin='rally:secret'),
]
self.assertEqual(calls, server.ssh.run.mock_calls)
filename = m_open.mock_calls[0][1][0]
self.assertTrue(filename.endswith('rally/deploy/engines/'
'devstack/install.sh'))
self.assertEqual([mock.call(filename, 'rb')], m_open.mock_calls)
@mock.patch('rally.deploy.engine.EngineFactory.get_provider')
@mock.patch('rally.deploy.engines.devstack.get_updated_server')
@mock.patch('rally.deploy.engines.devstack.get_script')
@mock.patch('rally.deploy.serverprovider.provider.Server')
@mock.patch('rally.deploy.engines.devstack.objects.Endpoint')
def test_deploy(self, m_endpoint, m_server, m_gs, m_gus, m_gp):
m_gp.return_value = fake_provider = mock.Mock()
server = mock.Mock()
server.host = 'host'
m_endpoint.return_value = 'fake_endpoint'
m_gus.return_value = ds_server = mock.Mock()
m_gs.return_value = 'fake_script'
server.get_credentials.return_value = 'fake_credentials'
fake_provider.create_servers.return_value = [server]
with mock.patch.object(self.engine, 'deployment') as m_d:
endpoints = self.engine.deploy()
self.assertEqual(['fake_endpoint'], endpoints)
m_endpoint.assert_called_once_with('http://host:5000/v2.0/', 'admin',
'secret', 'admin', 'admin')
m_d.add_resource.assert_called_once_with(
info='fake_credentials',
provider_name='DevstackEngine',
type='credentials')
repo = 'https://github.com/openstack-dev/devstack.git'
cmd = '/bin/sh -e -s %s master' % repo
server.ssh.run.assert_called_once_with(cmd, stdin='fake_script')
ds_calls = [
mock.call.ssh.run('cat > ~/devstack/localrc', stdin=mock.ANY),
mock.call.ssh.run('~/devstack/stack.sh')
]
self.assertEqual(ds_calls, ds_server.mock_calls)
localrc = ds_server.mock_calls[0][2]['stdin']
self.assertIn('ADMIN_PASSWORD=secret', localrc)
| ytsarev/rally | tests/deploy/engines/test_devstack.py | Python | apache-2.0 | 4,266 |
'''
@author: Team Alpha, <aa5186@nyu.edu>
Name: Customer Model
Purpose: This library is part of the customer REST API for the ecommerce website
'''
from customer import Customer
| devops-alpha-s17/customers | customers/__init__.py | Python | apache-2.0 | 184 |
__author__ = 'yinjun'
class Solution:
"""
@param nums: The integer array
@return: The length of LIS (longest increasing subsequence)
"""
def longestIncreasingSubsequence(self, nums):
# write your code here
if nums == None or nums == []:
return 0
l = len(nums)
length = [0 for i in range()]
maxLength = 0
for i in range(l):
length[i] = 1
for j in range(0, i):
if nums[j] <= nums[i]:
length[i] = max(length[i], length[j] + 1)
maxLength = max(maxLength, length[i])
return maxLength | shootsoft/practice | lintcode/NineChapters/04/longest-increasing-subsequence.py | Python | apache-2.0 | 646 |
from distutils.core import setup, Extension
from distutils.sysconfig import get_python_lib
import os, os.path
import sys
if 'sdist' in sys.argv and sys.platform != "win32":
assert os.system("git show-ref -s HEAD > .gitrev") == 0
if sys.platform == "darwin":
# Don't create resource files on OS X tar.
os.environ['COPY_EXTENDED_ATTRIBUTES_DISABLE'] = 'true'
os.environ['COPYFILE_DISABLE'] = 'true'
setup_args = {}
def add_command_class(name, cls):
cmdclasses = setup_args.get('cmdclass', {})
cmdclasses[name] = cls
setup_args['cmdclass'] = cmdclasses
if sys.version_info[0] >= 3:
import lib2to3.refactor
from distutils.command.build_py \
import build_py_2to3 as build_py
# need to convert sources to Py3 on installation
fixers = [ fix for fix in lib2to3.refactor.get_fixers_from_package("lib2to3.fixes")
if fix.split('fix_')[-1] not in ('next',)
]
build_py.fixer_names = fixers
add_command_class("build_py", build_py)
pxd_include_dirs = [
directory for directory, dirs, files in os.walk('Cython/Includes')
if '__init__.pyx' in files or '__init__.pxd' in files
or directory == 'Cython/Includes' or directory == 'Cython/Includes/Deprecated']
pxd_include_patterns = [
p+'/*.pxd' for p in pxd_include_dirs ] + [
p+'/*.pyx' for p in pxd_include_dirs ]
if sys.version_info < (2,4):
install_base_dir = get_python_lib(prefix='')
import glob
patterns = pxd_include_patterns + [
'Cython/Plex/*.pxd',
'Cython/Compiler/*.pxd',
'Cython/Runtime/*.pyx'
]
setup_args['data_files'] = [
(os.path.dirname(os.path.join(install_base_dir, pattern)),
[ f for f in glob.glob(pattern) ])
for pattern in patterns
]
else:
setup_args['package_data'] = {
'Cython.Plex' : ['*.pxd'],
'Cython.Compiler' : ['*.pxd'],
'Cython.Runtime' : ['*.pyx', '*.pxd'],
'Cython' : [ p[7:] for p in pxd_include_patterns ],
}
# This dict is used for passing extra arguments that are setuptools
# specific to setup
setuptools_extra_args = {}
# tells whether to include cygdb (the script and the Cython.Debugger package
include_debugger = sys.version_info[:2] > (2, 5)
if 'setuptools' in sys.modules:
setuptools_extra_args['zip_safe'] = False
setuptools_extra_args['entry_points'] = {
'console_scripts': [
'cython = Cython.Compiler.Main:setuptools_main',
]
}
scripts = []
else:
if os.name == "posix":
scripts = ["bin/cython"]
if include_debugger:
scripts.append('bin/cygdb')
else:
scripts = ["cython.py"]
if include_debugger:
scripts.append('cygdb.py')
def compile_cython_modules(profile=False, compile_more=False, cython_with_refnanny=False):
source_root = os.path.abspath(os.path.dirname(__file__))
compiled_modules = ["Cython.Plex.Scanners",
"Cython.Plex.Actions",
"Cython.Compiler.Lexicon",
"Cython.Compiler.Scanning",
"Cython.Compiler.Parsing",
"Cython.Compiler.Visitor",
"Cython.Compiler.Code",
"Cython.Runtime.refnanny",]
if compile_more:
compiled_modules.extend([
"Cython.Compiler.ParseTreeTransforms",
"Cython.Compiler.Nodes",
"Cython.Compiler.ExprNodes",
"Cython.Compiler.ModuleNode",
"Cython.Compiler.Optimize",
])
defines = []
if cython_with_refnanny:
defines.append(('CYTHON_REFNANNY', '1'))
extensions = []
if sys.version_info[0] >= 3:
from Cython.Distutils import build_ext as build_ext_orig
for module in compiled_modules:
source_file = os.path.join(source_root, *module.split('.'))
if os.path.exists(source_file + ".py"):
pyx_source_file = source_file + ".py"
else:
pyx_source_file = source_file + ".pyx"
dep_files = []
if os.path.exists(source_file + '.pxd'):
dep_files.append(source_file + '.pxd')
if '.refnanny' in module:
defines_for_module = []
else:
defines_for_module = defines
extensions.append(
Extension(module, sources = [pyx_source_file],
define_macros = defines_for_module,
depends = dep_files)
)
class build_ext(build_ext_orig):
# we must keep the original modules alive to make sure
# their code keeps working when we remove them from
# sys.modules
dead_modules = []
def build_extensions(self):
# add path where 2to3 installed the transformed sources
# and make sure Python (re-)imports them from there
already_imported = [ module for module in sys.modules
if module == 'Cython' or module.startswith('Cython.') ]
keep_alive = self.dead_modules.append
for module in already_imported:
keep_alive(sys.modules[module])
del sys.modules[module]
sys.path.insert(0, os.path.join(source_root, self.build_lib))
if profile:
from Cython.Compiler.Options import directive_defaults
directive_defaults['profile'] = True
print("Enabled profiling for the Cython binary modules")
build_ext_orig.build_extensions(self)
setup_args['ext_modules'] = extensions
add_command_class("build_ext", build_ext)
else: # Python 2.x
from distutils.command.build_ext import build_ext as build_ext_orig
try:
class build_ext(build_ext_orig):
def build_extension(self, ext, *args, **kargs):
try:
build_ext_orig.build_extension(self, ext, *args, **kargs)
except StandardError:
print("Compilation of '%s' failed" % ext.sources[0])
from Cython.Compiler.Main import compile
from Cython import Utils
if profile:
from Cython.Compiler.Options import directive_defaults
directive_defaults['profile'] = True
print("Enabled profiling for the Cython binary modules")
source_root = os.path.dirname(__file__)
for module in compiled_modules:
source_file = os.path.join(source_root, *module.split('.'))
if os.path.exists(source_file + ".py"):
pyx_source_file = source_file + ".py"
else:
pyx_source_file = source_file + ".pyx"
c_source_file = source_file + ".c"
source_is_newer = False
if not os.path.exists(c_source_file):
source_is_newer = True
else:
c_last_modified = Utils.modification_time(c_source_file)
if Utils.file_newer_than(pyx_source_file, c_last_modified):
source_is_newer = True
else:
pxd_source_file = source_file + ".pxd"
if os.path.exists(pxd_source_file) and Utils.file_newer_than(pxd_source_file, c_last_modified):
source_is_newer = True
if source_is_newer:
print("Compiling module %s ..." % module)
result = compile(pyx_source_file)
c_source_file = result.c_file
if c_source_file:
# Py2 distutils can't handle unicode file paths
if isinstance(c_source_file, unicode):
filename_encoding = sys.getfilesystemencoding()
if filename_encoding is None:
filename_encoding = sys.getdefaultencoding()
c_source_file = c_source_file.encode(filename_encoding)
if '.refnanny' in module:
defines_for_module = []
else:
defines_for_module = defines
extensions.append(
Extension(module, sources = [c_source_file],
define_macros = defines_for_module)
)
else:
print("Compilation failed")
if extensions:
setup_args['ext_modules'] = extensions
add_command_class("build_ext", build_ext)
except Exception:
print('''
ERROR: %s
Extension module compilation failed, looks like Cython cannot run
properly on this system. To work around this, pass the option
"--no-cython-compile". This will install a pure Python version of
Cython without compiling its own sources.
''' % sys.exc_info()[1])
raise
cython_profile = '--cython-profile' in sys.argv
if cython_profile:
sys.argv.remove('--cython-profile')
try:
sys.argv.remove("--cython-compile-all")
cython_compile_more = True
except ValueError:
cython_compile_more = False
try:
sys.argv.remove("--cython-with-refnanny")
cython_with_refnanny = True
except ValueError:
cython_with_refnanny = False
try:
sys.argv.remove("--no-cython-compile")
except ValueError:
compile_cython_modules(cython_profile, cython_compile_more, cython_with_refnanny)
setup_args.update(setuptools_extra_args)
from Cython import __version__ as version
packages = [
'Cython',
'Cython.Build',
'Cython.Compiler',
'Cython.Runtime',
'Cython.Distutils',
'Cython.Plex',
'Cython.Tests',
'Cython.Build.Tests',
'Cython.Compiler.Tests',
]
if include_debugger:
packages.append('Cython.Debugger')
packages.append('Cython.Debugger.Tests')
# it's enough to do this for Py2.5+:
setup_args['package_data']['Cython.Debugger.Tests'] = ['codefile', 'cfuncs.c']
setup(
name = 'Cython',
version = version,
url = 'http://www.cython.org',
author = 'Greg Ewing, Robert Bradshaw, Stefan Behnel, Dag Seljebotn, et al.',
author_email = 'cython-dev@codespeak.net',
description = "The Cython compiler for writing C extensions for the Python language.",
long_description = """\
The Cython language makes writing C extensions for the Python language as
easy as Python itself. Cython is a source code translator based on the
well-known Pyrex_, but supports more cutting edge functionality and
optimizations.
The Cython language is very close to the Python language (and most Python
code is also valid Cython code), but Cython additionally supports calling C
functions and declaring C types on variables and class attributes. This
allows the compiler to generate very efficient C code from Cython code.
This makes Cython the ideal language for writing glue code for external C
libraries, and for fast C modules that speed up the execution of Python
code.
.. _Pyrex: http://www.cosc.canterbury.ac.nz/greg.ewing/python/Pyrex/
""",
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: C",
"Programming Language :: Cython",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
"Topic :: Software Development :: Libraries :: Python Modules"
],
scripts = scripts,
packages=packages,
# pyximport
py_modules = ["pyximport/__init__",
"pyximport/pyximport",
"pyximport/pyxbuild",
"cython"],
**setup_args
)
| bzzzz/cython | setup.py | Python | apache-2.0 | 12,175 |
from math import fabs
"""Kept these functions outside the class, since they are static
for the search and movement functions for board. The downside is it creates
an object for search purposes, which seems relatively heavy. I'll
optimize later if necessary
"""
def shift_up(pos):
"""returns new position that has shifted up"""
return Position(pos.x, pos.y + 1)
def shift_down(pos):
"""returns new position that has shifted down"""
return Position(pos.x, pos.y - 1)
def shift_right(pos):
"""returns new position that has shifted right"""
return Position(pos.x + 1, pos.y)
def shift_left(pos):
"""returns new position that has shifted left"""
return Position(pos.x - 1, pos.y)
def shift_up_left(pos):
"""returns new position that has shifted up"""
return Position(pos.x + 1, pos.y - 1)
def shift_down_left(pos):
"""returns new position that has shifted down"""
return Position(pos.x - 1, pos.y - 1)
def shift_up_right(pos):
"""returns new position that has shifted right"""
return Position(pos.x + 1, pos.y + 1)
def shift_down_right(pos):
"""returns new position that has shifted left"""
return Position(pos.x - 1, pos.y + 1)
class Position(object):
def __init__(self, x, y):
self._x, self._y = x, y
# TODO: test the speed of this implementation
# def __cmp__(self, other):
# if (self.width != other.width):
# return cmp(self.width, other.width)
# return cmp(self.height, other.height)
def __eq__(self, pos):
return self._x == pos.x and self._y == pos.y
def __ne__(self, pos):
return self._x != pos.x or self._y != pos.y
def __hash__(self):
return hash(('x', self._x, 'y', self._y))
def __repr__(self):
return '({0},{1})'.format(self._x, self._y)
def __str__(self):
return '({0},{1})'.format(self._x, self._y)
# ##################### Accessors/Modifiers ###############################
@property
def x(self):
return self._x
@property
def y(self):
return self._y
# ############################### Discovery ###############################
def is_diagonal(self, pos):
"""Verify if points are diagonal"""
return fabs(self.x - pos.x) == fabs(self.y - pos.y)
def is_parallel(self, pos):
"""Verify if points are parallel"""
return self.x == pos.x or self.y == pos.y
def is_adj(self, pos):
"""Verify if points are adjacent.
checks parallel on x plane if y +/- 1 is adj
checks parallel on y plane if x +/- 1 is adj
check if diagonal and if only 1 square away on the x plane
check if diagonal and if only 1 square away on the y plane
"""
return ((self.x == pos.x and fabs(self.y - pos.y)) == 1) \
or ((self.y == pos.y and fabs(self.x - pos.x)) == 1) \
or ((self.is_diagonal(pos) and fabs(self.y - pos.y)) == 1) \
or ((self.is_diagonal(pos) and fabs(self.x - pos.x)) == 1)
def to_json(self):
return {'x': self.x, 'y': self.y}
| aelkikhia/pyduel_engine | pyduel_engine/model/position.py | Python | apache-2.0 | 3,110 |
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
LVM class for performing LVM operations.
"""
import itertools
import math
import os
import re
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import excutils
from cinder.brick import exception
from cinder.brick import executor
from cinder.i18n import _LE, _LI
from cinder import utils
LOG = logging.getLogger(__name__)
class LVM(executor.Executor):
"""LVM object to enable various LVM related operations."""
LVM_CMD_PREFIX = ['env', 'LC_ALL=C']
def __init__(self, vg_name, root_helper, create_vg=False,
physical_volumes=None, lvm_type='default',
executor=putils.execute, lvm_conf=None):
"""Initialize the LVM object.
The LVM object is based on an LVM VolumeGroup, one instantiation
for each VolumeGroup you have/use.
:param vg_name: Name of existing VG or VG to create
:param root_helper: Execution root_helper method to use
:param create_vg: Indicates the VG doesn't exist
and we want to create it
:param physical_volumes: List of PVs to build VG on
:param lvm_type: VG and Volume type (default, or thin)
:param executor: Execute method to use, None uses common/processutils
"""
super(LVM, self).__init__(execute=executor, root_helper=root_helper)
self.vg_name = vg_name
self.pv_list = []
self.vg_size = 0.0
self.vg_free_space = 0.0
self.vg_lv_count = 0
self.vg_uuid = None
self.vg_thin_pool = None
self.vg_thin_pool_size = 0.0
self.vg_thin_pool_free_space = 0.0
self._supports_snapshot_lv_activation = None
self._supports_lvchange_ignoreskipactivation = None
self.vg_provisioned_capacity = 0.0
if create_vg and physical_volumes is not None:
self.pv_list = physical_volumes
try:
self._create_vg(physical_volumes)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating Volume Group'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
if self._vg_exists() is False:
LOG.error(_LE('Unable to locate Volume Group %s'), vg_name)
raise exception.VolumeGroupNotFound(vg_name=vg_name)
# NOTE: we assume that the VG has been activated outside of Cinder
if lvm_type == 'thin':
pool_name = "%s-pool" % self.vg_name
if self.get_volume(pool_name) is None:
try:
self.create_thin_pool(pool_name)
except putils.ProcessExecutionError:
# Maybe we just lost the race against another copy of
# this driver being in init in parallel - e.g.
# cinder-volume and cinder-backup starting in parallel
if self.get_volume(pool_name) is None:
raise
self.vg_thin_pool = pool_name
self.activate_lv(self.vg_thin_pool)
self.pv_list = self.get_all_physical_volumes(root_helper, vg_name)
if lvm_conf and os.path.isfile(lvm_conf):
LVM.LVM_CMD_PREFIX = ['env',
'LC_ALL=C',
'LVM_SYSTEM_DIR=/etc/cinder']
def _vg_exists(self):
"""Simple check to see if VG exists.
:returns: True if vg specified in object exists, else False
"""
exists = False
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'-o', 'name', self.vg_name]
(out, _err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
volume_groups = out.split()
if self.vg_name in volume_groups:
exists = True
return exists
def _create_vg(self, pv_list):
cmd = ['vgcreate', self.vg_name, ','.join(pv_list)]
self._execute(*cmd, root_helper=self._root_helper, run_as_root=True)
def _get_vg_uuid(self):
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'-o', 'uuid', self.vg_name]
(out, _err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
return out.split()
else:
return []
def _get_thin_pool_free_space(self, vg_name, thin_pool_name):
"""Returns available thin pool free space.
:param vg_name: the vg where the pool is placed
:param thin_pool_name: the thin pool to gather info for
:returns: Free space in GB (float), calculated using data_percent
"""
cmd = LVM.LVM_CMD_PREFIX +\
['lvs', '--noheadings', '--unit=g',
'-o', 'size,data_percent', '--separator',
':', '--nosuffix']
# NOTE(gfidente): data_percent only applies to some types of LV so we
# make sure to append the actual thin pool name
cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name))
free_space = 0.0
try:
(out, err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
out = out.strip()
data = out.split(':')
pool_size = float(data[0])
data_percent = float(data[1])
consumed_space = pool_size / 100 * data_percent
free_space = pool_size - consumed_space
free_space = round(free_space, 2)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error querying thin pool about data_percent'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
return free_space
@staticmethod
def get_lvm_version(root_helper):
"""Static method to get LVM version from system.
:param root_helper: root_helper to use for execute
:returns: version 3-tuple
"""
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--version']
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
lines = out.split('\n')
for line in lines:
if 'LVM version' in line:
version_list = line.split()
# NOTE(gfidente): version is formatted as follows:
# major.minor.patchlevel(library API version)[-customisation]
version = version_list[2]
version_filter = r"(\d+)\.(\d+)\.(\d+).*"
r = re.search(version_filter, version)
version_tuple = tuple(map(int, r.group(1, 2, 3)))
return version_tuple
@staticmethod
def supports_thin_provisioning(root_helper):
"""Static method to check for thin LVM support on a system.
:param root_helper: root_helper to use for execute
:returns: True if supported, False otherwise
"""
return LVM.get_lvm_version(root_helper) >= (2, 2, 95)
@property
def supports_snapshot_lv_activation(self):
"""Property indicating whether snap activation changes are supported.
Check for LVM version >= 2.02.91.
(LVM2 git: e8a40f6 Allow to activate snapshot)
:returns: True/False indicating support
"""
if self._supports_snapshot_lv_activation is not None:
return self._supports_snapshot_lv_activation
self._supports_snapshot_lv_activation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 91))
return self._supports_snapshot_lv_activation
@property
def supports_lvchange_ignoreskipactivation(self):
"""Property indicating whether lvchange can ignore skip activation.
Check for LVM version >= 2.02.99.
(LVM2 git: ab789c1bc add --ignoreactivationskip to lvchange)
"""
if self._supports_lvchange_ignoreskipactivation is not None:
return self._supports_lvchange_ignoreskipactivation
self._supports_lvchange_ignoreskipactivation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 99))
return self._supports_lvchange_ignoreskipactivation
@staticmethod
def get_lv_info(root_helper, vg_name=None, lv_name=None):
"""Retrieve info about LVs (all, in a VG, or a single LV).
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:param lv_name: optional, gathers info for only the specified LV
:returns: List of Dictionaries with LV info
"""
cmd = LVM.LVM_CMD_PREFIX + ['lvs', '--noheadings', '--unit=g',
'-o', 'vg_name,name,size', '--nosuffix']
if lv_name is not None and vg_name is not None:
cmd.append("%s/%s" % (vg_name, lv_name))
elif vg_name is not None:
cmd.append(vg_name)
try:
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception(reraise=True) as ctx:
if "not found" in err.stderr:
ctx.reraise = False
LOG.info(_LI("'Not found' when querying LVM info. "
"(vg_name=%(vg)s, lv_name=%(lv)s"),
{'vg': vg_name, 'lv': lv_name})
out = None
lv_list = []
if out is not None:
volumes = out.split()
for vg, name, size in itertools.izip(*[iter(volumes)] * 3):
lv_list.append({"vg": vg, "name": name, "size": size})
return lv_list
def get_volumes(self, lv_name=None):
"""Get all LV's associated with this instantiation (VG).
:returns: List of Dictionaries with LV info
"""
return self.get_lv_info(self._root_helper,
self.vg_name,
lv_name)
def get_volume(self, name):
"""Get reference object of volume specified by name.
:returns: dict representation of Logical Volume if exists
"""
ref_list = self.get_volumes(name)
for r in ref_list:
if r['name'] == name:
return r
return None
@staticmethod
def get_all_physical_volumes(root_helper, vg_name=None):
"""Static method to get all PVs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with PV info
"""
field_sep = '|'
cmd = LVM.LVM_CMD_PREFIX + ['pvs', '--noheadings',
'--unit=g',
'-o', 'vg_name,name,size,free',
'--separator', field_sep,
'--nosuffix']
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
pvs = out.split()
if vg_name is not None:
pvs = [pv for pv in pvs if vg_name == pv.split(field_sep)[0]]
pv_list = []
for pv in pvs:
fields = pv.split(field_sep)
pv_list.append({'vg': fields[0],
'name': fields[1],
'size': float(fields[2]),
'available': float(fields[3])})
return pv_list
def get_physical_volumes(self):
"""Get all PVs associated with this instantiation (VG).
:returns: List of Dictionaries with PV info
"""
self.pv_list = self.get_all_physical_volumes(self._root_helper,
self.vg_name)
return self.pv_list
@staticmethod
def get_all_volume_groups(root_helper, vg_name=None):
"""Static method to get all VGs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with VG info
"""
cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings',
'--unit=g', '-o',
'name,size,free,lv_count,uuid',
'--separator', ':',
'--nosuffix']
if vg_name is not None:
cmd.append(vg_name)
(out, _err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
vg_list = []
if out is not None:
vgs = out.split()
for vg in vgs:
fields = vg.split(':')
vg_list.append({'name': fields[0],
'size': float(fields[1]),
'available': float(fields[2]),
'lv_count': int(fields[3]),
'uuid': fields[4]})
return vg_list
def update_volume_group_info(self):
"""Update VG info for this instantiation.
Used to update member fields of object and
provide a dict of info for caller.
:returns: Dictionaries of VG info
"""
vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
if len(vg_list) != 1:
LOG.error(_LE('Unable to find VG: %s'), self.vg_name)
raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
self.vg_size = float(vg_list[0]['size'])
self.vg_free_space = float(vg_list[0]['available'])
self.vg_lv_count = int(vg_list[0]['lv_count'])
self.vg_uuid = vg_list[0]['uuid']
total_vols_size = 0.0
if self.vg_thin_pool is not None:
# NOTE(xyang): If providing only self.vg_name,
# get_lv_info will output info on the thin pool and all
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg
# stack-vg stack-pool 9.51
# stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00
# stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00
# stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00
#
# If providing both self.vg_name and self.vg_thin_pool,
# get_lv_info will output only info on the thin pool, but not
# individual volumes.
# get_lv_info(self._root_helper, 'stack-vg', 'stack-pool')
# sudo lvs --noheadings --unit=g -o vg_name,name,size
# --nosuffix stack-vg/stack-pool
# stack-vg stack-pool 9.51
#
# We need info on both the thin pool and the volumes,
# therefore we should provide only self.vg_name, but not
# self.vg_thin_pool here.
for lv in self.get_lv_info(self._root_helper,
self.vg_name):
lvsize = lv['size']
# get_lv_info runs "lvs" command with "--nosuffix".
# This removes "g" from "1.00g" and only outputs "1.00".
# Running "lvs" command without "--nosuffix" will output
# "1.00g" if "g" is the unit.
# Remove the unit if it is in lv['size'].
if not lv['size'][-1].isdigit():
lvsize = lvsize[:-1]
if lv['name'] == self.vg_thin_pool:
self.vg_thin_pool_size = lvsize
tpfs = self._get_thin_pool_free_space(self.vg_name,
self.vg_thin_pool)
self.vg_thin_pool_free_space = tpfs
else:
total_vols_size = total_vols_size + float(lvsize)
total_vols_size = round(total_vols_size, 2)
self.vg_provisioned_capacity = total_vols_size
def _calculate_thin_pool_size(self):
"""Calculates the correct size for a thin pool.
Ideally we would use 100% of the containing volume group and be done.
But the 100%VG notation to lvcreate is not implemented and thus cannot
be used. See https://bugzilla.redhat.com/show_bug.cgi?id=998347
Further, some amount of free space must remain in the volume group for
metadata for the contained logical volumes. The exact amount depends
on how much volume sharing you expect.
:returns: An lvcreate-ready string for the number of calculated bytes.
"""
# make sure volume group information is current
self.update_volume_group_info()
# leave 5% free for metadata
return "%sg" % (self.vg_free_space * 0.95)
def create_thin_pool(self, name=None, size_str=None):
"""Creates a thin provisioning pool for this VG.
The syntax here is slightly different than the default
lvcreate -T, so we'll just write a custom cmd here
and do it.
:param name: Name to use for pool, default is "<vg-name>-pool"
:param size_str: Size to allocate for pool, default is entire VG
:returns: The size string passed to the lvcreate command
"""
if not self.supports_thin_provisioning(self._root_helper):
LOG.error(_LE('Requested to setup thin provisioning, '
'however current LVM version does not '
'support it.'))
return None
if name is None:
name = '%s-pool' % self.vg_name
vg_pool_name = '%s/%s' % (self.vg_name, name)
if not size_str:
size_str = self._calculate_thin_pool_size()
cmd = ['lvcreate', '-T', '-L', size_str, vg_pool_name]
LOG.debug("Creating thin pool '%(pool)s' with size %(size)s of "
"total %(free)sg", {'pool': vg_pool_name,
'size': size_str,
'free': self.vg_free_space})
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
self.vg_thin_pool = name
return size_str
def create_volume(self, name, size_str, lv_type='default', mirror_count=0):
"""Creates a logical volume on the object's VG.
:param name: Name to use when creating Logical Volume
:param size_str: Size to use when creating Logical Volume
:param lv_type: Type of Volume (default or thin)
:param mirror_count: Use LVM mirroring with specified count
"""
if lv_type == 'thin':
pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool)
cmd = ['lvcreate', '-T', '-V', size_str, '-n', name, pool_path]
else:
cmd = ['lvcreate', '-n', name, self.vg_name, '-L', size_str]
if mirror_count > 0:
cmd.extend(['-m', mirror_count, '--nosync',
'--mirrorlog', 'mirrored'])
terras = int(size_str[:-1]) / 1024.0
if terras >= 1.5:
rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
# NOTE(vish): Next power of two for region size. See:
# http://red.ht/U2BPOD
cmd.extend(['-R', str(rsize)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating Volume'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
@utils.retry(putils.ProcessExecutionError)
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'):
"""Creates a snapshot of a logical volume.
:param name: Name to assign to new snapshot
:param source_lv_name: Name of Logical Volume to snapshot
:param lv_type: Type of LV (default or thin)
"""
source_lvref = self.get_volume(source_lv_name)
if source_lvref is None:
LOG.error(_LE("Trying to create snapshot by non-existent LV: %s"),
source_lv_name)
raise exception.VolumeDeviceNotFound(device=source_lv_name)
cmd = ['lvcreate', '--name', name,
'--snapshot', '%s/%s' % (self.vg_name, source_lv_name)]
if lv_type != 'thin':
size = source_lvref['size']
cmd.extend(['-L', '%sg' % (size)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error creating snapshot'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def _mangle_lv_name(self, name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not name.startswith('snapshot'):
return name
return '_' + name
def activate_lv(self, name, is_snapshot=False):
"""Ensure that logical volume/snapshot logical volume is activated.
:param name: Name of LV to activate
:raises: putils.ProcessExecutionError
"""
# This is a no-op if requested for a snapshot on a version
# of LVM that doesn't support snapshot activation.
# (Assume snapshot LV is always active.)
if is_snapshot and not self.supports_snapshot_lv_activation:
return
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
# Must pass --yes to activate both the snap LV and its origin LV.
# Otherwise lvchange asks if you would like to do this interactively,
# and fails.
cmd = ['lvchange', '-a', 'y', '--yes']
if self.supports_lvchange_ignoreskipactivation:
cmd.append('-K')
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error activating LV'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def delete(self, name):
"""Delete logical volume or snapshot.
:param name: Name of LV to delete
"""
def run_udevadm_settle():
self._execute('udevadm', 'settle',
root_helper=self._root_helper, run_as_root=True,
check_exit_code=False)
# LV removal seems to be a race with other writers or udev in
# some cases (see LP #1270192), so we enable retry deactivation
LVM_CONFIG = 'activation { retry_deactivation = 1} '
try:
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.debug('Error reported running lvremove: CMD: %(command)s, '
'RESPONSE: %(response)s',
{'command': err.cmd, 'response': err.stderr})
LOG.debug('Attempting udev settle and retry of lvremove...')
run_udevadm_settle()
# The previous failing lvremove -f might leave behind
# suspended devices; when lvmetad is not available, any
# further lvm command will block forever.
# Therefore we need to skip suspended devices on retry.
LVM_CONFIG += 'devices { ignore_suspended_devices = 1}'
self._execute(
'lvremove',
'--config', LVM_CONFIG,
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
LOG.debug('Successfully deleted volume: %s after '
'udev settle.', name)
def revert(self, snapshot_name):
"""Revert an LV from snapshot.
:param snapshot_name: Name of snapshot to revert
"""
self._execute('lvconvert', '--merge',
snapshot_name, root_helper=self._root_helper,
run_as_root=True)
def lv_has_snapshot(self, name):
cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o',
'Attr', '%s/%s' % (self.vg_name, name)]
out, _err = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out:
out = out.strip()
if (out[0] == 'o') or (out[0] == 'O'):
return True
return False
def extend_volume(self, lv_name, new_size):
"""Extend the size of an existing volume."""
try:
self._execute('lvextend', '-L', new_size,
'%s/%s' % (self.vg_name, lv_name),
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error extending Volume'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
def vg_mirror_free_space(self, mirror_count):
free_capacity = 0.0
disks = []
for pv in self.pv_list:
disks.append(float(pv['available']))
while True:
disks = sorted([a for a in disks if a > 0.0], reverse=True)
if len(disks) <= mirror_count:
break
# consume the smallest disk
disk = disks[-1]
disks = disks[:-1]
# match extents for each mirror on the largest disks
for index in list(range(mirror_count)):
disks[index] -= disk
free_capacity += disk
return free_capacity
def vg_mirror_size(self, mirror_count):
return (self.vg_free_space / (mirror_count + 1))
def rename_volume(self, lv_name, new_name):
"""Change the name of an existing volume."""
try:
self._execute('lvrename', self.vg_name, lv_name, new_name,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_LE('Error renaming logical volume'))
LOG.error(_LE('Cmd :%s'), err.cmd)
LOG.error(_LE('StdOut :%s'), err.stdout)
LOG.error(_LE('StdErr :%s'), err.stderr)
raise
| yanheven/cinder | cinder/brick/local_dev/lvm.py | Python | apache-2.0 | 28,909 |
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DataDao(object):
def __init__(self, spark_context, config):
self.spark_context = spark_context
self.zookeepers_uri = config.ZOOKEEPERS
self.device_measurement_table_name = config.DEVICE_MEASUREMENT_TABLE_NAME
def get_data_from_hbase(self, account_id, component_id, start_ts, stop_ts):
print("get_data_for_device", account_id, component_id, start_ts, stop_ts)
start = account_id + '\0' + component_id + '\0' + start_ts
stop = account_id + '\0' + component_id + '\0' + stop_ts
print("START: ", start.replace('\0', '\\0'))
print("STOP: ", stop.replace('\0', '\\0'))
# see https://hbase.apache.org/0.94/xref/org/apache/hadoop/hbase/mapreduce/TableInputFormat.html
conf = {
"hbase.zookeeper.quorum": self.zookeepers_uri,
"hbase.mapreduce.inputtable": self.device_measurement_table_name,
"hbase.mapreduce.scan.row.start": str(start),
"hbase.mapreduce.scan.row.stop": str(stop),
"hbase.mapreduce.scan.columns": "data:measure_val"
}
key_conv = "org.apache.spark.examples.pythonconverters.ImmutableBytesWritableToStringConverter"
value_conv = "org.apache.spark.examples.pythonconverters.HBaseResultToStringConverter"
rdd = self.spark_context.newAPIHadoopRDD("org.apache.hadoop.hbase.mapreduce.TableInputFormat",
"org.apache.hadoop.hbase.io.ImmutableBytesWritable",
"org.apache.hadoop.hbase.client.Result",
conf=conf, keyConverter=key_conv, valueConverter=value_conv)
return rdd
| enableiot/iotanalytics-rule-engine | pydeps/db/dataDao.py | Python | apache-2.0 | 2,301 |
# -*- coding: utf-8 -*-
import re
import unicodedata
def lower_text(text):
return text.lower()
def normalize_unicode(text, form='NFKC'):
normalized_text = unicodedata.normalize(form, text)
return normalized_text
def normalize_number(text):
"""
pattern = r'\d+'
replacer = re.compile(pattern)
result = replacer.sub('0', text)
"""
# 連続した数字を0で置換
replaced_text = re.sub(r'\d+', '0', text)
return replaced_text
| chakki-works/elephant_sense | scripts/data/normalization.py | Python | apache-2.0 | 477 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of compono released under the Apache 2 license.
# See the NOTICE for more information.
from distutils.command.install_data import install_data
import os
import sys
if not hasattr(sys, 'version_info') or sys.version_info < (2, 5, 0, 'final'):
raise SystemExit("Compono requires Python 2.5 or later.")
from setuptools import setup, find_packages
from mtcompono import __version__
data_files = []
for root in ('compono/_design', 'compono/media', 'compono/templates'):
for dir, dirs, files in os.walk(root):
dirs[:] = [x for x in dirs if not x.startswith('.')]
files = [x for x in files if not x.startswith('.')]
data_files.append((os.path.join('compono', dir),
[os.path.join(dir, file_) for file_ in files]))
class install_package_data(install_data):
def finalize_options(self):
self.set_undefined_options('install',
('install_lib', 'install_dir'))
install_data.finalize_options(self)
cmdclass = {'install_data': install_package_data }
setup(
name = 'mtcompono',
version = __version__,
description = 'Minmialist Django CMS',
long_description = file(
os.path.join(
os.path.dirname(__file__),
'README.rst'
)
).read(),
author = 'Benoit Chesneau',
author_email = 'benoitc@e-engura.org',
license = 'BSD',
url = 'http://github.com/benoitc/mt-compono',
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
zip_safe = False,
packages = find_packages(),
include_package_data = True,
datafiles = data_files,
cmdclass=cmdclass,
install_requires = [
'setuptools>=0.6b1'
],
requires = [
'django (>1.1.0)',
'couchdbkit (>=0.4.2)',
'simplejson (>=2.0.9)',
],
test_suite = 'nose.collector',
)
| benoitc/mt-compono | setup.py | Python | apache-2.0 | 2,399 |
#!/usr/bin/env python2
'''
Copyright (c) 2012 Peter Andrews
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
#import system libraries
from __future__ import division
from gi.repository import Gtk, GooCanvas, Gdk
import networkx as nx
import xml.etree.ElementTree as et
from time import time
from os.path import basename, dirname
#import local libraries
import Errors
import Graph
import Drawing
from ETree import sub_element as sub
from undobuffer import UndoableBuffer as UBuff
class Sociogram(object):
def __init__(self):
'''Set up internals and instantiate/fix up GUI using Gtk.Builder.'''
self.version = "0.1"
self.G = Graph.Sociograph() # instantiate the graph for storage and positioning
#placeholders for selecting objects
self.selection = None
self.seltype = None
self.seldata = None
self.highlight_dist = 1
self.highlight = False
self.savepath = None
self.lastsave = time()
self.dirty = False
self.title = "Untitled Diagram - Sociogram"
self.doc_title = ""
self.doc_desc = ""
self.builder = Gtk.Builder()
self.builder.add_from_file("ui/sociogram.ui")
#set default type for new objects
self.builder.get_object("newtypesel").set_active(0)
self.node_lbl_store = Gtk.ListStore(str)
completions = []
for t in range(5):
x = Gtk.EntryCompletion()
x.set_model(self.node_lbl_store)
x.set_text_column(0)
x.set_minimum_key_length(1)
x.set_inline_completion(True)
x.set_popup_single_match(False)
completions.append(x)
#populate from_combo, to_combo, attr_edit_name with liststore and renderer
from_combo = self.builder.get_object("from_combo")
from_combo.set_model(self.node_lbl_store)
self.from_main = self.builder.get_object("from_combo_entry")
self.from_main.set_completion(completions[0])
to_combo = self.builder.get_object("to_combo")
to_combo.set_model(self.node_lbl_store)
self.to_main = self.builder.get_object("to_combo_entry")
self.to_main.set_completion(completions[1])
#populate from_combo_dlg and to_combo_dlg from the same model as above
from_combo_dlg = self.builder.get_object("from_combo_dlg")
from_combo_dlg.set_model(self.node_lbl_store)
self.from_dlg = self.builder.get_object("from_combo_dlg_entry")
self.from_dlg.set_completion(completions[2])
to_combo_dlg = self.builder.get_object("to_combo_dlg")
to_combo_dlg.set_model(self.node_lbl_store)
self.to_dlg = self.builder.get_object("to_combo_dlg_entry")
self.to_dlg.set_completion(completions[3])
#add completion to toolbar node search field
searchbar = self.builder.get_object("search_entry")
searchbar.set_completion(completions[4])
#connect attribute view with attribute list, create columns, and make it all sortable
editname = Gtk.CellRendererText()
editname.set_property("editable", True)
editname.connect("edited", self.update_attrs, 0)
editval = Gtk.CellRendererText()
editval.set_property("editable", True)
editval.connect("edited", self.update_attrs, 1)
self.attr_store = Gtk.ListStore(str, str, bool, str)
self.attr_store.set_sort_column_id(0, Gtk.SortType.ASCENDING)
self.attr_disp = self.builder.get_object("attrstree")
self.attr_disp.set_model(self.attr_store)
self.namecol = Gtk.TreeViewColumn("Name", editname, text=0)
self.namecol.set_sort_column_id(0)
self.namecol.set_expand(True)
col2 = Gtk.TreeViewColumn("Value", editval, text=1)
col2.set_sort_column_id(1)
col2.set_expand(True)
togglecell = Gtk.CellRendererToggle()
togglecell.connect("toggled", self.update_attrs, None, 2)
col3 = Gtk.TreeViewColumn("Visible", togglecell, active=2)
col3.set_sort_column_id(2)
self.attr_disp.append_column(self.namecol)
self.attr_disp.append_column(col2)
self.attr_disp.append_column(col3)
self.rel_store = Gtk.ListStore(str, str)
rel_combo = self.builder.get_object("rel_combo")
rel_combo.set_model(self.rel_store)
cell = Gtk.CellRendererText()
rel_combo.pack_start(cell, True)
rel_combo.add_attribute(cell, 'text', 0)
#create canvas object and add to the scroll window
#VERY IMPORTANT. using the normal window.add() call fails, but setting the parent like this makes everything fine
self.canvas = Drawing.Canvas(parent=self.builder.get_object("canvas_scroll"), has_tooltip=True, background_color="white")
#attach callbacks
self.canvas.node_callback = self.node_clicked
self.canvas.line_callback = self.line_clicked
self.canvas.key_handler = self.canvas_key_handler
self.canvas.connect("button-press-event", self.canvas_clicked)
self.canvas.connect("scroll-event", self.scroll_handler)
self.canvas.mouseover_callback = self.update_pointer
#TODO once the prefs dialog is implemented, this should be moved to a separate default style update function
#populate our default styling
sheet = self.canvas.edge_default_stylesheet
sheet.stroke_color = 0x000000ff
sheet.set_fontdesc('sans normal 11')
sheet.sel_color = 0xff0000ff
sheet.sel_width = 1
sheet.text_color = 0x000000ff
sheet.set_fontdesc('sans normal 11')
sheet = self.canvas.vertex_default_stylesheet
sheet.fill_color = 0xffff00ff
sheet.stroke_color = 0x000000ff
sheet.sel_color = 0x000000ff
sheet.text_color = 0x000000ff
sheet.set_fontdesc('sans normal 11')
#create file type filters
supported_extensions = {"XML Files":"*.xml", "Sociogram Files":"*.soc", "All Files":"*.*"}
self.save_dlg = self.builder.get_object("save_dlg")
self.open_dlg = self.builder.get_object("open_dlg")
for name, ext in supported_extensions.iteritems():
fil = Gtk.FileFilter()
fil.set_name(name)
fil.add_pattern("*.xml")
self.save_dlg.add_filter(fil)
self.open_dlg.add_filter(fil)
#create undoable text buffers
self.notes_buff = UBuff()
self.notes_buff.connect('insert-text', self.notes_check_undo)
self.notes_buff.connect('delete-range', self.notes_check_undo)
self.desc_buff = UBuff()
self.desc_buff.connect('insert-text', self.desc_check_undo)
self.desc_buff.connect('delete-range', self.desc_check_undo)
self.builder.get_object("notes_view").set_buffer(self.notes_buff)
self.builder.get_object("docdesc_view").set_buffer(self.desc_buff)
# Declare references for all the dialogs and popups we need. We do keep
# the builder around, so this is mostly for code readability.
# TODO instantiate reference to everything we care about, so that the
# expensive builder can be nixed.
self.not_implemented_box = self.builder.get_object("not_implemented_err")
self.about_dlg = self.builder.get_object("about_dlg")
self.add_item_dlg = self.builder.get_object("add_item_dlg")
self.dup_err_dlg = self.builder.get_object("dup_err_dlg")
self.export_dlg = self.builder.get_object("export_dlg")
self.node_style_dlg = self.builder.get_object("node_style_popup")
self.paste_warn_dlg = self.builder.get_object("paste_warning_dlg")
self.prefs_dlg = self.builder.get_object("prefs_dlg")
self.rel_style_dlg = self.builder.get_object("rel_style_popup")
self.style_dlg = self.builder.get_object("style_dlg")
self.blank_err_dlg = self.builder.get_object("blank_err_dlg")
self.find_dlg = self.builder.get_object("find_dlg")
self.save_warning = self.builder.get_object("savewarn_dlg")
self.save_close_warning = self.builder.get_object("savewarn_close_dlg")
self.savebtn = self.builder.get_object("savefilebtn")
self.savemenu = self.builder.get_object("menu_save")
self.docprops_dlg = self.builder.get_object("docprops_dlg")
self.hscroll = self.builder.get_object("horiz_scroll_adj")
self.vscroll = self.builder.get_object("vertical_scroll_adj")
self.scale_adj = self.builder.get_object("scale_adj")
self.load_err_dlg = self.builder.get_object("load_err_dlg")
self.settings_warning = self.builder.get_object("load_settings_warning")
self.notes_view = self.builder.get_object("notes_view")
self.notes_undo_btn = self.builder.get_object("notes_undo")
self.notes_redo_btn = self.builder.get_object("notes_redo")
self.desc_view = self.builder.get_object("desc_view")
self.desc_undo_btn = self.builder.get_object("desc_undo")
self.desc_redo_btn = self.builder.get_object("desc_redo")
#set our version string
self.builder.get_object("about_dlg").set_version(self.version)
#initialize our fullscreen tracker
self.fullscreen = False
#show the main window
self.window = self.builder.get_object("sociogram_main")
self.window.set_title(self.title)
self.window.show_all()
# Attach handlers to signals described in the .ui file.
# TODO once functionality is finalized, remove redundant signals
handlers_main = {
"app.quit": self.do_quit,
"app.newfile": self.make_new,
"app.openfile": self.openfile,
"app.savefile": self.save,
"app.saveas": self.save_new,
"app.do_export": self.show_dev_error,
"app.do_find": self.show_dev_error,
"app.help": self.show_dev_error,
"app.undo": self.undo_picker,
"app.redo": self.redo_picker,
"app.search": self.find_node,
"app.reset_search_icon": self.set_search_icon,
"app.toggle_widget": self.toggle_widget,
"app.toggle_fs": self.toggle_fullscreen,
"app.track_fs": self.track_fullscreen,
"app.hide_add_controls": self.hide_addbox_controls,
"app.deep_find": self.show_dev_error,
"app.show_dlg": self.show_dlg,
"app.show_add": self.show_add,
"app.dlg_sanity": self.check_new_dlg_sanity,
"app.set_highlight_radius": self.set_highlight_dist,
"app.check_name": self.check_label,
"app.canvas_keys": self.canvas_key_handler,
"app.zoom_in": self.zoom_in_step,
"app.zoom_out": self.zoom_out_step,
"app.zoom_reset": self.zoom_reset,
"app.zoom_fit": self.zoom_fit,
"app.cancel_newname": self.cancel_name_edit,
"app.zoom_changed": self.update_zoom,
"app.check_endpoint": self.check_endpoint,
"app.cancel_endpoint": self.cancel_endpoint,
"app.set_selrel": self.pick_rel,
"app.show_docprops": self.edit_docprops,
"app.notes_undo": self.notes_undo,
"app.notes_redo": self.notes_redo,
"app.desc_undo": self.desc_undo,
"app.desc_redo": self.desc_redo,
"data.add": self.show_dev_error,
"data.copyattrs": self.show_dev_error,
"data.pasteattrs": self.do_paste,
"data.delsel": self.delete_selection,
"data.update_lbl": self.update_lbl,
"data.update_origin": self.update_origin,
"data.update_dest": self.update_dest,
"data.update_weight": self.update_weight,
"data.update_bidir": self.update_bidir,
"data.update_notes": self.update_notes,
"data.newattr": self.add_attr,
"data.delattr": self.del_attr,
"data.updateattr": self.show_dev_error,
"graph.toggle_highlight": self.toggle_highlight,
"graph.refresh": self.redraw
}
self.builder.connect_signals(handlers_main)
#disable sidebar programmatically, so that labels will be drawn correctly
self.builder.get_object("sidebarbox").set_sensitive(False)
def nothing(self, a=None, b=None, c=None):
print 'nothing'
def undo_picker(self, widget, data=None):
'''Event handler and standalone. Picks which undo function to trigger based on the focused UI element.'''
focus = self.window.get_focus()
if focus is self.notes_view:
self.notes_undo()
elif focus is self.desc_view:
self.desc_undo()
else:
self.undo()
pass
def redo_picker(self, widget, data=None):
'''Event handler and standalone. Picks which redo function to trigger based on the focused UI element.'''
focus = self.window.get_focus()
if focus is self.notes_view:
self.notes_redo()
elif focus is self.desc_view:
self.desc_redo()
else:
self.redo()
pass
def notes_undo(self, widget=None, data=None):
'''Event handler and standalone. Undo notes edit action.'''
self.notes_buff.undo()
def notes_redo(self, widget=None, data=None):
'''Event handler and standalone. Redo notes edit action.'''
self.notes_buff.redo()
def notes_check_undo(self, widget=None, data=None, a=None, b=None):
'''Event handler. Set undo/redo availability according to widget properties.'''
self.notes_undo_btn.set_sensitive(self.notes_buff.can_undo)
self.notes_redo_btn.set_sensitive(self.notes_buff.can_redo)
def desc_undo(self, widget, data=None):
'''Event handler. Undo description edit action.'''
self.desc_buff.undo()
def desc_redo(self, widget, data=None):
'''Event handler. Redo description edit action.'''
self.desc_buff.redo()
def desc_check_undo(self, widget=None, data=None, a=None, b=None):
'''Event handler. Set undo/redo availability according to widget properties.'''
self.desc_undo_btn.set_sensitive(self.desc_buff.can_undo)
self.desc_redo_btn.set_sensitive(self.desc_buff.can_redo)
def undo(self, widget=None, data=None):
'''Event handler and standalone. Global (non-text field) undo function.'''
pass
def redo(self, widget=None, data=None):
'''Event handler and standalone. Global (non-text field) redo function.'''
pass
def edit_docprops(self, widget=None, data=None):
'''Event handler. Edit document title and description.'''
ret = self.docprops_dlg.run()
self.docprops_dlg.hide()
if ret == 5:
#store title and description
self.doc_title = self.builder.get_object("doctitle_entry").get_text()
start = self.desc_buff.get_iter_at_offset(0)
end = self.desc_buff.get_iter_at_offset(-1)
self.doc_desc = self.desc_buff.get_text(start, end, False)
self.set_dirty(True)
#TODO show status message about updated title/desc
def do_quit(self, widget=None, data=None):
'''Handle quitting.'''
self.confirm_discard(closing=True)
Gtk.main_quit()
def update_title(self):
'''Update our window's title.'''
if self.savepath == None:
self.title = "Untitled Diagram - Sociogram"
else:
#title appearance: basename (path) - Sociogram
self.title = "%s (%s) - Sociogram" % (basename(self.savepath), dirname(self.savepath))
self.window.set_title(self.title)
def set_doc_title(self, title):
'''Set document title text. This is not used in the window title.'''
if title is None:
title = ""
self.doc_title = title
self.builder.get_object("doctitle_entry").set_text(title)
def set_doc_desc(self, desc):
'''Set document description.'''
if desc is None:
desc = ""
self.doc_desc = desc
self.desc_buff.set_text(desc)
self.desc_buff.clear_undo()
self.desc_check_undo()
def set_dirty(self, val):
'''Mark the current file as "dirty", indicating unsaved changes.'''
if val == self.dirty: return
self.dirty = val
if not self.dirty:
self.lastsave = time()
self.window.set_title(self.title)
else:
self.window.set_title("*"+self.title)
def confirm_discard(self, closing=False):
'''Prompt the user to save unfinished changes, if necessary.
Returns True if it's OK to continue, and False if not.'''
if self.dirty:
#warn about closing current document
dlg = self.save_close_warning if closing else self.save_warning
if self.savepath == None:
dlg.set_markup(_("Save your changes before closing?"))
else:
dlg.set_markup(_("Save your changes to %s before closing?") %basename(self.savepath))
period = int((time() - self.lastsave)/60)
dlg.format_secondary_text(_("If you don't save, changes from the last %s minutes will be lost.") %period)
response = dlg.run()
dlg.hide()
if response == 5:
self.save() #save it and continue
elif response != 2:
return False #cancel and prevent any further action
#either we've saved or been told not to, so clear the calling function to go ahead
return True
def make_new(self, widget=None, data=None):
'''Event handler and standalone. Wipe the current data and load defaults.'''
if not self.confirm_discard(): return
self.clear_select()
self.rel_store.clear()
self.attr_store.clear()
self.node_lbl_store.clear()
self.savepath = None
self.set_dirty(False)
self.update_title()
self.set_doc_title(None)
self.set_doc_desc(None)
self.zoom_reset()
self.G.clear()
self.redraw()
def openfile(self, widget=None, data=None):
'''Event handler and standalone. Pick a file and load from it.'''
if not self.confirm_discard(): return
open_dlg = self.builder.get_object("open_dlg")
if self.savepath != None:
open_dlg.set_filename(self.savepath)
response = open_dlg.run()
open_dlg.hide()
if response == 5:
#clear existing data
self.set_dirty(False) #prevent another prompt
self.make_new()
self.savepath = open_dlg.get_filename()
self.update_title()
try:
tree = et.parse(self.savepath)
except ParseError:
self.load_err_dlg.run()
self.load_err_dlg.hide()
self.make_new()
return
root = tree.getroot()
err = None
if self.version != root.get('version'):
#TODO figure out how to compare save format versions
#err = "version"
pass
try:
#import doc title and description
title = root.find('title').text
desc = root.find('description').text
self.set_doc_title(title)
self.set_doc_desc(desc)
#import document-specific settings
settings = root.find('settings')
scale = settings.find('scale').text
self.scale_adj.set_value(float(scale))
sortprefs = settings.find('attrsort')
sortdir = Gtk.SortType.ASCENDING if sortprefs.get('direction') == "asc" else Gtk.SortType.DESCENDING
sortcol = int(sortprefs.text)
self.attr_store.set_sort_column_id(sortcol, sortdir)
except AttributeError:
err = "settings"
try:
#import document data
data = root.find('data')
for node in data.iter('node'):
#add node
uid = node.find('uid').text
label = node.find('label').text
notes = node.find('notes').text
#construct attributes list
attrs = []
for a in node.iter('attr'):
name = a.find('name').text
val = a.find('value').text
vis = True if a.find('visible').text == "True" else False
u = a.find('uid').text
attrs.append((name, val, vis, u))
self._add_node(label, uid=uid, attrs=attrs, notes=notes)
for edge in data.iter('rel'):
#add edge
uid = edge.find('uid').text
label = edge.find('label').text
notes = edge.find('notes').text
#construct attributes list
attrs = []
for a in edge.iter('attr'):
name = a.find('name').text
val = a.find('value').text
vis = True if a.find('visible').text == "True" else False
u = a.find('uid').text
attrs.append((name, val, vis, u))
origin = edge.find('origin').text
dest = edge.find('dest').text
mutual = True if edge.find('mutual').text == "True" else False
weight = int(float(edge.find('weight').text))
self._add_rel(label, origin, dest, weight, mutual, attrs=attrs, uid=uid, notes=notes)
except AttributeError:
err = "all"
if err is not None:
if err is "all":
self.load_err_dlg.run()
self.load_err_dlg.hide()
self.make_new()
return
if err is "settings":
self.settings_warning.run()
self.settings_warning.hide()
self.redraw()
self.builder.get_object("canvas_eventbox").grab_focus()
#TODO send "opened" message through status bar
def save(self, widget=None, data=None):
'''Event handler and standalone. Save to known path.'''
if self.savepath == None:
self.save_new()
return
if not self.dirty: return
#construct XML
#create base element and record program version
root = et.Element('sociogram', attrib={'version':self.version})
#create title and description
sub(root, 'title', self.doc_title)
sub(root, 'description', self.doc_desc)
#create settings
settings = sub(root, 'settings')
sub(settings, 'scale', self.scale_adj.get_value())
sortcol, sortdir = self.attr_store.get_sort_column_id()
sortdir = "asc" if sortdir == Gtk.SortType.ASCENDING else 'desc'
sortset = sub(settings, 'attrsort', sortcol)
sortset.set('direction', sortdir)
#create data holder
data = sub(root, 'data')
#create nodes
for node in self.G.nodes_iter():
n = self.G.node[node]['node']
node = sub(data, 'node')
#store uid, label, notes, attributes
sub(node, 'uid', n.uid)
sub(node, 'label', n.label)
sub(node, 'notes', n.notes)
for aid, aval in n.attributes.iteritems():
attr = sub(node, 'attr')
sub(attr, 'uid', aid)
sub(attr, 'name', aval['name'])
sub(attr, 'value', aval['value'])
sub(attr, 'visible', aval['visible'])
for f, t in self.G.edges_iter():
for e in self.G[f][t]['rels']:
edge = sub(data, 'rel')
#store uid, label, notes, attributes, origin, destination, weight, mutual
sub(edge, 'uid', e.uid)
sub(edge, 'label', e.label)
sub(edge, 'notes', e.notes)
for aid, aval in e.attributes.iteritems():
attr = sub(edge, 'attr')
sub(attr, 'uid', aid)
sub(attr, 'name', aval['name'])
sub(attr, 'value', aval['value'])
sub(attr, 'visible', aval['visible'])
sub(edge, 'origin', e.from_node)
sub(edge, 'dest', e.to_node)
sub(edge, 'weight', e.weight)
sub(edge, 'mutual', e.mutual)
#write xml to self.savepath
tree = et.ElementTree(element=root)
tree.write(self.savepath, encoding="UTF-8")
self.set_dirty(False)
#TODO send "saved" message through status bar
def save_new(self, widget=None, data=None):
'''Event handler and standalone. Pick save location, then save.'''
save_dlg = self.builder.get_object("save_dlg")
if self.savepath != None:
save_dlg.set_filename(self.savepath)
else:
save_dlg.set_current_name("sociogram.xml")
save_dlg.run()
save_dlg.hide()
uri = save_dlg.get_filename()
if uri != None:
self.savepath = uri
self.update_title()
self.set_dirty(True)
self.save()
def update_pointer(self, widget, data=None, extra=None, hand=None):
'''Event handler to pick the pointer used on the graph.'''
if hand:
cursor = Gdk.Cursor(Gdk.CursorType.HAND1)
else:
cursor = None
rwin = self.builder.get_object("canvas_eventbox").get_window()
rwin.set_cursor(cursor)
def set_highlight_dist(self, widget, data=None):
'''Event handler. Update our internal highlight distance.'''
self.highlight_dist = widget.get_value()
self._do_highlight()
def toggle_highlight(self, widget, data=None):
'''Event handler. Turn highlight mode on or off.'''
self.highlight = widget.get_active()
self.builder.get_object("highlight_btn").set_active(self.highlight)
self.builder.get_object("menu_highlight").set_active(self.highlight)
self._do_highlight()
def _do_highlight(self):
'''Set params for the special "highlight" draw mode and draw it.'''
if not (self.highlight and self.seltype == 'node'):
return
#TODO
# iterate out from the selected node
# mark/store those nodes and paths
# trigger a special canvas.freshen operation
def add_attr(self, widget, data=None):
'''Event handler. Adds an attribute to the current selection.'''
if self.selection == None:
return
#add to underlying Node object
uid = self.seldata.add_attr(("attribute", "value", False))
#add to the store
newrow = self.attr_store.append(("attribute", "value", False, uid))
path = self.attr_store.get_path(newrow)
#start editing right away
self.attr_disp.set_cursor(path, self.namecol, True)
self.set_dirty(True)
def next_field(self, widget, data=None):
pass
def del_attr(self, widget, data=None):
'''Event handler. Removes the currently highlighted attribute from the current selection.'''
tree_selection = self.attr_disp.get_selection()
if self.selection == None or tree_selection == None:
return
selrow = tree_selection.get_selected()[1]
auid = self.attr_store.get_value(selrow, 3)
#remove from underlying Node object
self.seldata.del_attr(auid)
#now from the store
self.attr_store.remove(selrow)
self.set_dirty(True)
def update_attrs(self, widget, path=None, text=None, col=None):
'''Event handler. Change name or value of currently selected attribute.'''
tree_selection = self.attr_disp.get_selection()
if self.selection == None or tree_selection == None:
return
#get the attribute ID
#selrow = tree_selection.get_selected()[1]
auid = self.attr_store[path][3]
#update attribute
attr = self.seldata.attributes[auid]
if col==0:
attr['name'] = text
elif col==1:
attr['value'] = text
elif col==2:
attr['visible'] = not attr['visible']
#update store
#once we do this, the original path is no longer valid
if text != None:
self.attr_store[path][col] = text
else:
self.attr_store[path][col] = attr['visible']
#refresh only, no need for a complete redraw
self.refresh(self.seldata)
self.set_dirty(True)
def show_add(self, widget, data=None):
'''Show Add Object dialog after resetting field defaults and ensuring sane Relationship availability.'''
#show the thing
self.builder.get_object("new_type_box").set_sensitive(self.G.order() > 1) #disallow Relationships unless we have enough nodes
self.builder.get_object("name_entry_dlg").grab_focus() #focus on the label field
response = self.add_item_dlg.run()
self.add_item_dlg.hide()
#only add things if the OK button was pressed
if response == 4:
#add a new object
obj_type = self.builder.get_object("newtypesel").get_active_text()
lbl = self.builder.get_object("name_entry_dlg").get_text()
#get the rest of our data
paste = self.builder.get_object("use_copied_attrs").get_active()
if "Rel" in obj_type:
#grab extra data fields
fnode = self.from_dlg.get_text()
tnode = self.to_dlg.get_text()
weight = self.builder.get_object("weight_spin_dlg").get_value()
bidir = self.builder.get_object("bidir_new").get_active()
rel, new_edge = self._add_rel(lbl, fnode, tnode, weight, bidir, paste)
#just do a refresh when possible
do_refresh = not new_edge
else:
do_refresh = False #adding a node always requres a full redraw
node = self._add_node(lbl, paste)
# Since we can never refresh on node creation, we can ignore
# nodes completely.
if do_refresh:
#select the appropriate edge
obj = self.canvas.get_edge(fnode, tnode)
self.refresh(rel)
self.set_selection(obj, srel=rel)
else:
#this is the normal case
self.redraw()
if "Rel" in obj_type:
obj = self.canvas.get_edge(fnode, tnode) #get proper edge
else:
obj = self.canvas.get_vertex(lbl) #get proper node
self.set_selection(obj)
self.set_dirty(True)
#clear the dialog's values
self.builder.get_object("newtypesel").set_active(0)
self.builder.get_object("name_entry_dlg").set_text('')
self.from_dlg.set_text('')
self.to_dlg.set_text('')
self.builder.get_object("weight_spin_dlg").set_value(5)
self.builder.get_object("bidir_new").set_active(False)
self.builder.get_object("use_copied_attrs").set_active(False)
def _add_node(self, lbl, paste=False, attrs=None, uid=None, notes=None):
'''Internal function. Add a node and handle bookkeeping.'''
#make sure the node doesn't already exist
if lbl in self.G:
self.show_dup_node_error()
return
#create object and update data
node = Graph.Node(lbl, attrs=attrs, uid=uid, notes=notes)
self.G.add_node(lbl, {"node": node}) #add to graph
self.node_lbl_store.append([lbl]) #update name list for the dropdowns
if paste:
self._paste_attrs(node)
def _add_rel(self, lbl, fname, tname, weight, bidir, paste=False, attrs=None, uid=None, notes=None):
'''Internal function. Add a relationship and handle bookkeeping.'''
#make sure both nodes exist
if fname not in self.G:
raise Errors.MissingNode("Node %s not in graph." % fname)
if tname not in self.G:
raise Errors.MissingNode("Node %s not in graph." % tname)
#create relationship object
rel = Graph.Relationship(lbl, fname, tname, weight, bidir, attrs=attrs, uid=uid, notes=notes)
new_edge = self.G.add_rel(rel)
if paste:
self._paste_attrs(rel)
return (rel, new_edge)
def hide_addbox_controls(self, widget, data=None):
'''Event handler. Toggles visibility of Relationship-specific fields in the Add Object dialog based on selected Type.'''
obj_type = widget.get_active_text()
vis = "Rel" in obj_type
for wname in ["frombox_dlg", "tobox_dlg", "weightbox_dlg", "bidir_new"]:
self.builder.get_object(wname).set_visible(vis)
self.builder.get_object("name_entry_dlg").grab_focus()
def check_new_dlg_sanity(self, widget, data=None):
'''Event handler. Disable Add Object dialog's Add button unless inputs make sense.'''
haslbl = self.builder.get_object("name_entry_dlg").get_text() != ""
ftext = self.from_dlg.get_text()
ttext = self.to_dlg.get_text()
hasnodes = True
diffnodes = True
nodes_exist = True
obj_type = self.builder.get_object("newtypesel").get_active_text()
if "Rel" in obj_type:
hasnodes = ftext != '' and ttext != ''
diffnodes = ftext != ttext
nodes_exist = ftext in self.G and ttext in self.G
sense = haslbl and hasnodes and diffnodes and nodes_exist
self.builder.get_object("new_ok").set_sensitive(sense)
# Picks the appropriate object to paste into, then passes off to _paste_attrs.
def do_paste(self, widget, data=None):
'''Event handler. Trigger paste operation for selected item.'''
if self.selection == None: return
self._paste_attrs(self.selection)
self.set_dirty(True)
# Internal function to overwrite target object's attributes with those from
# the clipboard.
def _paste_attrs(self, obj):
'''Paste copied attributes into obj, overwriting if necessary.'''
#TODO paste
#silently fail if no paste buffer
self.show_dev_error()
def center_on(self, obj):
'''Center the graph on a specific drawn object.'''
xyr = obj.get_xyr()
x, y = self.canvas.convert_from_item_space(obj.parent, xyr['x'], xyr['y'])
#get the visible window dimensions
vis_w = self.hscroll.get_page_size()
vis_h = self.vscroll.get_page_size()
corner_x = x - vis_w/2
corner_y = y - vis_h/2
self.canvas.scroll_to(corner_x, corner_y)
# Sees if a given node exists, and focuses on it if so.
# Slightly different definition so that data is big enough to accept random
# data from clicking the search icon.
def find_node(self, widget, *data):
'''Event handler. Search for the node whose label matches widget input.'''
node = widget.get_text()
if node not in self.G:
widget.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, Gtk.STOCK_NO)
widget.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _("No such node"))
widget.set_icon_activatable(Gtk.EntryIconPosition.SECONDARY, False)
else:
#select node
vertex = self.canvas.get_vertex(node)
self.set_selection(vertex)
self.center_on(vertex)
widget.select_region(0, widget.get_text_length())
def set_search_icon(self, widget, data=None):
'''Event handler. Resets search box icon when user types.'''
widget.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, Gtk.STOCK_FIND)
widget.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _("Search"))
widget.set_icon_activatable(Gtk.EntryIconPosition.SECONDARY, True)
def set_selection(self, selobj, obj=None, event=None, srel=None):
'''Event handler and standalone. Mark selobj as selected and update ui.'''
#clear the old selection
self.clear_select()
if selobj == None:
return
self.selection = selobj
self.seltype = selobj.type
self.selection.set_selected(True)
#grab data and update UI
if self.seltype == 'node':
self.seldata = selobj.node
self.activate_node_controls()
elif self.seltype == 'edge':
#activate all edit controls
self.activate_rel_controls()
#automatically select the edge's most heavily weighted relationship
self.seldata = selobj.get_heaviest()
#populate self.rel_store from the edge
target = None if srel == None else srel.uid
active = 0
for idex, rel in enumerate(self.selection.rels):
self.rel_store.append((str(rel), str(rel.uid)))
if target == rel.uid:
active = idex
#select the target relationship
#if none was provided, this defaults to the topmost
self.builder.get_object("rel_combo").set_active(active)
self._refresh_edit_controls()
self.builder.get_object("canvas_eventbox").grab_focus() #set keyboard focus
def pick_rel(self, widget=None, data=None, relnum=None):
'''Event handler. Select a specific relationship from an edge.'''
if self.selection == None: return
relid = widget.get_active() if widget != None else relnum
rel = self.selection.rels[relid]
if rel == self.seldata: return
self.seldata = rel
self._refresh_edit_controls()
self.builder.get_object("canvas_eventbox").grab_focus() #set keyboard focus
def _refresh_edit_controls(self):
'''Update contents of edit box controls.'''
if self.seltype == 'edge':
#populate edit controls for that relationship
weight = self.seldata.weight
bidir = self.seldata.mutual
tlbl = self.seldata.to_node
flbl = self.seldata.from_node
self.to_main.set_text(tlbl)
self.from_main.set_text(flbl)
self.builder.get_object("weight_spin").set_value(weight)
self.builder.get_object("bidir").set_active(bidir)
#populate common fields
self.builder.get_object("name_entry").set_text(self.seldata.label)
self.notes_buff.set_text(self.seldata.notes)
self.notes_buff.clear_undo()
self.notes_check_undo()
#populate self.attr_store from selected graph object's attributes
self.attr_store.clear()
for uid in self.seldata.attributes.iterkeys():
attr = self.seldata.attributes[uid]
self.attr_store.append((attr['name'], attr['value'], attr['visible'], uid))
def clear_select(self, canvas=None, data=None):
'''Event handler and standalone. Deselect object(s).'''
if self.selection != None:
self.update_notes()
self.selection.set_selected(False)
self.selection = None
self.seltype = None
self.seldata = None
self.attr_store.clear()
self.rel_store.clear()
self.disable_all_controls()
def delete_selection(self, widget=None, data=None):
'''Event handler and standalone. Delete selected object.'''
if self.selection == None:
return
if self.seltype == 'node':
self.G.remove_node(self.seldata.label)
self.selection.remove()
self.clear_select()
self.redraw()
else:
killed_edge = self.G.remove_rel(self.seldata)
if not killed_edge:
#in this case, we only need to refresh the graph, not redraw it
sel = self.selection
self.refresh(self.seldata, "deleted")
self.clear_select()
self.set_selection(sel)
else:
self.selection.remove()
self.clear_select()
self.redraw()
self.set_dirty(True)
def activate_node_controls(self):
'''Make only node-compatable selection-specific controls sensitive to input.'''
#enable the enter sidebar and related controls
self.activate_all_controls()
#hide rel controls
self.builder.get_object("relbox").hide()
self.builder.get_object("frombox").hide()
self.builder.get_object("tobox").hide()
self.builder.get_object("weightbox").hide()
def activate_rel_controls(self):
'''Show relationship controls.'''
self.activate_all_controls()
#show rel controls
self.builder.get_object("relbox").show()
self.builder.get_object("frombox").show()
self.builder.get_object("tobox").show()
self.builder.get_object("weightbox").show()
def activate_all_controls(self):
'''Make all selection-specific controls sensitive to input.'''
self.builder.get_object("sidebarbox").set_sensitive(True)
self.builder.get_object("add_attr").set_sensitive(True)
self.builder.get_object("remove_attr").set_sensitive(True)
#explicitly enable relationship-only controls, since they may have been previously explicitly disabled
self.builder.get_object("relbox").set_sensitive(True)
self.builder.get_object("frombox").set_sensitive(True)
self.builder.get_object("tobox").set_sensitive(True)
self.builder.get_object("weightbox").set_sensitive(True)
#also enable selection-specific buttons and menu items
self.builder.get_object("copy").set_sensitive(True)
self.builder.get_object("paste").set_sensitive(True)
self.builder.get_object("del").set_sensitive(True)
self.builder.get_object("menu_copy").set_sensitive(True)
self.builder.get_object("menu_paste").set_sensitive(True)
self.builder.get_object("menu_delete").set_sensitive(True)
def disable_all_controls(self):
'''Make all selection-specific controls unresponsive to input and clear their values.'''
self.builder.get_object("sidebarbox").set_sensitive(False)
#clear values
self.builder.get_object("name_entry").set_text('')
self.from_main.set_text('')
self.to_main.set_text('')
self.builder.get_object("weight_spin").set_value(5)
self.notes_buff.set_text('')
self.notes_buff.clear_undo()
self.notes_check_undo()
#explicitly disable
self.builder.get_object("frombox").set_sensitive(False)
self.builder.get_object("tobox").set_sensitive(False)
self.builder.get_object("weightbox").set_sensitive(False)
self.builder.get_object("add_attr").set_sensitive(False)
self.builder.get_object("remove_attr").set_sensitive(False)
#disable buttons and menu items
self.builder.get_object("copy").set_sensitive(False)
self.builder.get_object("paste").set_sensitive(False)
self.builder.get_object("del").set_sensitive(False)
self.builder.get_object("menu_copy").set_sensitive(False)
self.builder.get_object("menu_paste").set_sensitive(False)
self.builder.get_object("menu_delete").set_sensitive(False)
def node_clicked(self, selobj, obj=None, event=None):
'''Event handler. Select and otherwise perform UI actions on node click.'''
self.set_selection(selobj)
btn = event.get_button()
#TODO handle the right-click menu
if btn[1] == 3L:
self.show_dev_error()
#use menu.popup function
def line_clicked(self, selobj, obj=None, event=None):
'''Event handler. Draw clicked edge as "selected".'''
self.set_selection(selobj)
btn = event.get_button()
#TODO handle the right-click menu
if btn[1] == 3L:
self.show_dev_error()
#use menu.popup function
def canvas_clicked(self, canvas, obj=None, event=None):
'''Event handler. Set keyboard focus and clear selection on canvas click.'''
self.clear_select()
self.builder.get_object("canvas_eventbox").grab_focus() #set keyboard focus
def canvas_key_handler(self, widget, event=None):
'''Event handler. Take actions based on keyboard input while a graph object is selected.'''
kvn = Gdk.keyval_name(event.keyval)
if kvn == '1':
self.zoom_reset()
return True
elif kvn == 'plus':
self.zoom_in_step()
return True
elif kvn == 'minus':
self.zoom_out_step()
return True
elif kvn == 'Delete':
self.delete_selection()
return True
elif kvn == 'Escape':
self.clear_select()
return True
elif kvn == 'Right' or kvn == 'Left':
val = self.hscroll.get_value()
adj = self.hscroll.get_step_increment()
if kvn == 'Right':
self.hscroll.set_value(val+adj)
else:
self.hscroll.set_value(val-adj)
return True
elif kvn == 'Up' or kvn == 'Down':
val = self.vscroll.get_value()
adj = self.vscroll.get_step_increment()
if kvn == 'Down':
self.vscroll.set_value(val+adj)
else:
self.vscroll.set_value(val-adj)
return True
def scroll_handler(self, widget, event=None):
'''Event handler. Change scroll action based on various contexts.'''
#TODO get masks from prefs
horiz_mask = Gdk.ModifierType.SHIFT_MASK
zoom_mask = Gdk.ModifierType.CONTROL_MASK
if event.state & horiz_mask:
val = self.hscroll.get_value() #minus step increment, capped at zero
adj = self.hscroll.get_step_increment()
if event.direction == Gdk.ScrollDirection.UP:
#scroll left
self.hscroll.set_value(val-adj)
elif event.direction == Gdk.ScrollDirection.DOWN:
#scroll right
self.hscroll.set_value(val+adj)
return True
elif event.state & zoom_mask:
if event.direction == Gdk.ScrollDirection.UP:
self.zoom_in_step()
elif event.direction == Gdk.ScrollDirection.DOWN:
self.zoom_out_step()
return True
#unless we handled it, let the event flow along
return False
def update_zoom(self, widget, data=None):
'''Event handler. Set scale to current adjustment value.'''
val = self.scale_adj.get_value()
self.canvas.set_scale(val / 100)
def zoom_in_step(self, widget=None, data=None):
'''Event handler. Enlarge scale by 20%.'''
val = self.scale_adj.get_value()
self.scale_adj.set_value(val + 20)
def zoom_out_step(self, widget=None, data=None):
'''Event handler. Shrink scale by 20%.'''
val = self.scale_adj.get_value()
self.scale_adj.set_value(val - 20)
def zoom_reset(self, widget=None, data=None):
'''Event handler. Set scale to 1.'''
self.scale_adj.set_value(100)
def zoom_fit(self, widget=None, data=None):
'''Event handler and standalone. Calculate optimal scale value to show entire area at once.'''
#get the visible window dimensions
vis_w = self.hscroll.get_page_size()
vis_h = self.vscroll.get_page_size()
#add padding to the graph width and height to account for invisible spacing rings
bounds = self.canvas.get_bounds()
graph_w = bounds.x2 - bounds.x1 + 20
graph_h = bounds.y2 - bounds.y1 + 20
if graph_w == 0 or graph_h == 0:
return
xscale = vis_h / graph_h
yscale = vis_w / graph_w
#use the smaller of the two scales so we fit as much as possible
scale = min(xscale, yscale) * 100
self.scale_adj.set_value(scale)
def check_label(self, widget, data=None):
'''Event handler. Warn if edited label is already used.'''
if widget == None or self.seltype != 'node':
return
newlbl = widget.get_text()
if newlbl != self.seldata.label and newlbl in self.G:
widget.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, Gtk.STOCK_DIALOG_ERROR)
widget.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _("Label already used"))
widget.set_icon_activatable(Gtk.EntryIconPosition.SECONDARY, False)
else:
widget.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, None)
def cancel_name_edit(self, widget, data=None):
'''Event handler. Reset name field if Esc key pressed.'''
kvn = Gdk.keyval_name(data.keyval)
if kvn == 'Escape':
widget.set_text(self.seldata.label)
def update_lbl(self, widget, data=None):
'''Event handler. Update selection's label and redraw it.'''
if self.selection == None:
return
newlbl = widget.get_text()
oldlbl = self.seldata.label
if oldlbl == newlbl:
return
#reset field if it was submitted blank
if newlbl == "":
widget.set_text(oldlbl)
return
if self.seltype == 'node':
#change the internal object's label
self.seldata.label = newlbl
#remove old label from the liststore and add the new one
for row in self.node_lbl_store:
if row[0] == oldlbl:
self.node_lbl_store.remove(row.iter)
break
self.node_lbl_store.append([newlbl])
#update internal relationship objects' to and from node labels
for n in self.G[oldlbl]:
for rel in self.G[oldlbl][n]['rels']:
if rel.from_node == oldlbl: rel.from_node = newlbl
if rel.to_node == oldlbl: rel.to_node = newlbl
#change the graph node's key
nx.relabel_nodes(self.G, {oldlbl:newlbl}, False)
else:
self.seldata.label = newlbl
tlbl = self.seldata.to_node
flbl = self.seldata.from_node
self.refresh(self.seldata, oldlbl)
self.set_dirty(True)
def check_endpoint(self, widget, data=None):
'''Event handler. Warn if desired endpoint does not exist.'''
if widget == None or self.seltype != 'edge':
return
newlbl = widget.get_text()
if newlbl not in self.G:
widget.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, Gtk.STOCK_DIALOG_ERROR)
widget.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, _("No such node"))
widget.set_icon_activatable(Gtk.EntryIconPosition.SECONDARY, False)
else:
widget.set_icon_from_stock(Gtk.EntryIconPosition.SECONDARY, None)
def cancel_endpoint(self, widget, data=None):
'''Event handler. Reset name field if Esc key pressed.'''
kvn = Gdk.keyval_name(data.keyval)
if kvn == 'Escape':
widget.set_text(self.seldata.from_node)
def update_origin(self, widget, data=None):
'''Event handler. Update selected relationship's origin.'''
if self.selection == None:
return
new_origin = widget.get_text()
if new_origin == self.seldata.from_node:
return
#reset field if it was submitted blank
if new_origin == "":
widget.set_text(self.seldata.from_node)
return
#if our new origin is our old destination, swap the two
if new_origin == self.seldata.to_node:
self.G.move_rel(self.seldata, origin=new_origin, dest=self.seldata.from_node)
#now that it's been updated, we can just assign the real to_node value
self.to_main.set_text(self.seldata.to_node)
#just refresh in this case
self.refresh(self.seldata)
else:
#otherwise, change the origin only
self.G.move_rel(self.seldata, origin=new_origin)
self.redraw()
self.set_dirty(True)
def update_dest(self, widget, data=None):
'''Event handler. Update selected relationship's destination.'''
if self.selection == None:
return
new_dest = widget.get_text()
if new_dest == self.seldata.to_node:
return
#reset field if it was submitted blank
if new_dest == "":
widget.set_text(self.seldata.to_node)
return
#if our new origin is our old destination, swap the two
if new_dest == self.seldata.from_node:
self.G.move_rel(self.seldata, origin=self.seldata.to_node, dest=new_dest)
#now that it's been updated, we can just assign the real to_node value
self.from_main.set_text(self.seldata.from_node)
#just refresh in this case
self.refresh(self.seldata)
else:
#otherwise, change the origin only
self.G.move_rel(self.seldata, dest=new_dest)
self.redraw()
self.set_dirty(True)
def update_weight(self, widget, data=None):
'''Event handler. Update selected relationship's weight and redraw it.'''
if self.selection == None: return
oldw = self.seldata.weight
neww = widget.get_value()
if oldw == neww: return
self.seldata.weight = neww
#refresh instead of fully redrawing
self.refresh(self.seldata)
self.set_dirty(True)
def update_bidir(self, widget, data=None):
'''Event handler. Update selected relationship's bidir property and redraw it.'''
if self.selection == None: return
oldb = self.seldata.weight
newb = widget.get_active()
if oldb == newb: return
self.seldata.mutual = newb
#refresh instead of fully redrawing
self.refresh(self.seldata)
self.set_dirty(True)
def update_notes(self, widget=None, data=None):
'''Event handler and standalone. Update notes field of selected object.'''
if self.selection == None: return
start = self.notes_buff.get_iter_at_offset(0)
end = self.notes_buff.get_iter_at_offset(-1)
self.seldata.notes = self.notes_buff.get_text(start, end, False)
def toggle_widget(self, widget, data=None):
'''Event handler and standalone. Toggle passed widget.'''
# The widget arg is populated correctly in Glade using the Custom Data
# field as the target, and marking the Switch attribute.
widget.set_visible(not widget.get_visible())
def toggle_fullscreen(self, widget, data=None):
'''Event handler and standalone. Toggles the app's Fullscreen state.'''
if self.is_fullscreen:
self.window.unfullscreen()
else:
self.window.fullscreen()
def track_fullscreen(self, widget, data=None):
'''Event handler. Tracks the app's fullscreen state.'''
mask = Gdk.WindowState.FULLSCREEN
self.is_fullscreen = (widget.get_window().get_state() & mask) == mask
def show_dlg(self, widget, data=None):
'''Event handler and standalone. Run, then hide the dialog box passed as widget.'''
# The widget arg is populated correctly in Glade using the Custom Data
# field as the target dialog, and marking the Switch attribute.
widget.run()
widget.hide()
def show_dup_node_error(self):
'''Show the Duplicate Node error dialog.'''
self.dup_err_dlg.run()
self.dup_err_dlg.hide()
def show_blank_err(self):
'''Show the Blank Label error dialog.'''
self.blank_err_dlg.run()
self.blank_err_dlg.hide()
def show_dev_error(self, widget=None, data=None, other=None):
'''Event handler and standalone. Show the Not Implemented dialog.'''
print widget
self.not_implemented_box.run()
self.not_implemented_box.hide()
def redraw(self, widget=None, data=None):
'''Event handler and standalone. Trigger a graph update and redraw.'''
seltype = None
if self.seltype == 'node':
seltype = 'node'
lbl = self.seldata.label
elif self.seltype == 'edge':
seltype = 'edge'
rel = self.seldata
#get edge selection data
tlbl = self.seldata.to_node
flbl = self.seldata.from_node
self.canvas.scroll_to(0, 0)
self.canvas.redraw(self.G)
#reset the cursor
rwin = self.builder.get_object("canvas_eventbox").get_window()
rwin.set_cursor(None)
#get back our selection
if seltype != None:
if seltype == 'node':
self.set_selection(self.canvas.get_vertex(lbl))
else:
self.set_selection(self.canvas.get_edge(tlbl, flbl))
if rel in self.selection.rels:
num = self.selection.rels.index(rel)
self.pick_rel(relnum = num)
self.builder.get_object("rel_combo").set_active(num)
#center the selection
self.center_on(self.selection)
def refresh(self, touch, oldlbl=None):
'''Redraw the diagram without updating node positions.'''
self.selection.set_selected(False)
self.canvas.refresh(touch, oldlbl)
self.selection.set_selected(True)
def _(text):
'''Get translated text where possible.'''
#TODO grab translated text
return text
def main():
'''Enter Gtk.main().'''
Gtk.main()
return
if __name__ == "__main__":
soc = Sociogram()
main()
| aurule/Sociogram | src/sociogram.py | Python | apache-2.0 | 60,889 |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Results of coverage measurement."""
import collections
from coverage.backward import iitems
from coverage.misc import contract, format_lines, SimpleRepr
class Analysis(object):
"""The results of analyzing a FileReporter."""
def __init__(self, data, file_reporter):
self.data = data
self.file_reporter = file_reporter
self.filename = self.file_reporter.filename
self.statements = self.file_reporter.lines()
self.excluded = self.file_reporter.excluded_lines()
# Identify missing statements.
executed = self.data.lines(self.filename) or []
executed = self.file_reporter.translate_lines(executed)
self.missing = self.statements - executed
if self.data.has_arcs():
self._arc_possibilities = sorted(self.file_reporter.arcs())
self.exit_counts = self.file_reporter.exit_counts()
self.no_branch = self.file_reporter.no_branch_lines()
n_branches = self.total_branches()
mba = self.missing_branch_arcs()
n_partial_branches = sum(len(v) for k,v in iitems(mba) if k not in self.missing)
n_missing_branches = sum(len(v) for k,v in iitems(mba))
else:
self._arc_possibilities = []
self.exit_counts = {}
self.no_branch = set()
n_branches = n_partial_branches = n_missing_branches = 0
self.numbers = Numbers(
n_files=1,
n_statements=len(self.statements),
n_excluded=len(self.excluded),
n_missing=len(self.missing),
n_branches=n_branches,
n_partial_branches=n_partial_branches,
n_missing_branches=n_missing_branches,
)
def missing_formatted(self):
"""The missing line numbers, formatted nicely.
Returns a string like "1-2, 5-11, 13-14".
"""
return format_lines(self.statements, self.missing)
def has_arcs(self):
"""Were arcs measured in this result?"""
return self.data.has_arcs()
def arc_possibilities(self):
"""Returns a sorted list of the arcs in the code."""
return self._arc_possibilities
def arcs_executed(self):
"""Returns a sorted list of the arcs actually executed in the code."""
executed = self.data.arcs(self.filename) or []
executed = self.file_reporter.translate_arcs(executed)
return sorted(executed)
def arcs_missing(self):
"""Returns a sorted list of the arcs in the code not executed."""
possible = self.arc_possibilities()
executed = self.arcs_executed()
missing = (
p for p in possible
if p not in executed
and p[0] not in self.no_branch
)
return sorted(missing)
def arcs_missing_formatted(self):
"""The missing branch arcs, formatted nicely.
Returns a string like "1->2, 1->3, 16->20". Omits any mention of
branches from missing lines, so if line 17 is missing, then 17->18
won't be included.
"""
arcs = self.missing_branch_arcs()
missing = self.missing
line_exits = sorted(iitems(arcs))
pairs = []
for line, exits in line_exits:
for ex in sorted(exits):
if line not in missing:
pairs.append("%d->%s" % (line, (ex if ex > 0 else "exit")))
return ', '.join(pairs)
def arcs_unpredicted(self):
"""Returns a sorted list of the executed arcs missing from the code."""
possible = self.arc_possibilities()
executed = self.arcs_executed()
# Exclude arcs here which connect a line to itself. They can occur
# in executed data in some cases. This is where they can cause
# trouble, and here is where it's the least burden to remove them.
# Also, generators can somehow cause arcs from "enter" to "exit", so
# make sure we have at least one positive value.
unpredicted = (
e for e in executed
if e not in possible
and e[0] != e[1]
and (e[0] > 0 or e[1] > 0)
)
return sorted(unpredicted)
def branch_lines(self):
"""Returns a list of line numbers that have more than one exit."""
return [l1 for l1,count in iitems(self.exit_counts) if count > 1]
def total_branches(self):
"""How many total branches are there?"""
return sum(count for count in self.exit_counts.values() if count > 1)
def missing_branch_arcs(self):
"""Return arcs that weren't executed from branch lines.
Returns {l1:[l2a,l2b,...], ...}
"""
missing = self.arcs_missing()
branch_lines = set(self.branch_lines())
mba = collections.defaultdict(list)
for l1, l2 in missing:
if l1 in branch_lines:
mba[l1].append(l2)
return mba
def branch_stats(self):
"""Get stats about branches.
Returns a dict mapping line numbers to a tuple:
(total_exits, taken_exits).
"""
missing_arcs = self.missing_branch_arcs()
stats = {}
for lnum in self.branch_lines():
exits = self.exit_counts[lnum]
try:
missing = len(missing_arcs[lnum])
except KeyError:
missing = 0
stats[lnum] = (exits, exits - missing)
return stats
class Numbers(SimpleRepr):
"""The numerical results of measuring coverage.
This holds the basic statistics from `Analysis`, and is used to roll
up statistics across files.
"""
# A global to determine the precision on coverage percentages, the number
# of decimal places.
_precision = 0
_near0 = 1.0 # These will change when _precision is changed.
_near100 = 99.0
def __init__(self, n_files=0, n_statements=0, n_excluded=0, n_missing=0,
n_branches=0, n_partial_branches=0, n_missing_branches=0
):
self.n_files = n_files
self.n_statements = n_statements
self.n_excluded = n_excluded
self.n_missing = n_missing
self.n_branches = n_branches
self.n_partial_branches = n_partial_branches
self.n_missing_branches = n_missing_branches
def init_args(self):
"""Return a list for __init__(*args) to recreate this object."""
return [
self.n_files, self.n_statements, self.n_excluded, self.n_missing,
self.n_branches, self.n_partial_branches, self.n_missing_branches,
]
@classmethod
def set_precision(cls, precision):
"""Set the number of decimal places used to report percentages."""
assert 0 <= precision < 10
cls._precision = precision
cls._near0 = 1.0 / 10**precision
cls._near100 = 100.0 - cls._near0
@property
def n_executed(self):
"""Returns the number of executed statements."""
return self.n_statements - self.n_missing
@property
def n_executed_branches(self):
"""Returns the number of executed branches."""
return self.n_branches - self.n_missing_branches
@property
def pc_covered(self):
"""Returns a single percentage value for coverage."""
if self.n_statements > 0:
numerator, denominator = self.ratio_covered
pc_cov = (100.0 * numerator) / denominator
else:
pc_cov = 100.0
return pc_cov
@property
def pc_covered_str(self):
"""Returns the percent covered, as a string, without a percent sign.
Note that "0" is only returned when the value is truly zero, and "100"
is only returned when the value is truly 100. Rounding can never
result in either "0" or "100".
"""
pc = self.pc_covered
if 0 < pc < self._near0:
pc = self._near0
elif self._near100 < pc < 100:
pc = self._near100
else:
pc = round(pc, self._precision)
return "%.*f" % (self._precision, pc)
@classmethod
def pc_str_width(cls):
"""How many characters wide can pc_covered_str be?"""
width = 3 # "100"
if cls._precision > 0:
width += 1 + cls._precision
return width
@property
def ratio_covered(self):
"""Return a numerator and denominator for the coverage ratio."""
numerator = self.n_executed + self.n_executed_branches
denominator = self.n_statements + self.n_branches
return numerator, denominator
def __add__(self, other):
nums = Numbers()
nums.n_files = self.n_files + other.n_files
nums.n_statements = self.n_statements + other.n_statements
nums.n_excluded = self.n_excluded + other.n_excluded
nums.n_missing = self.n_missing + other.n_missing
nums.n_branches = self.n_branches + other.n_branches
nums.n_partial_branches = (
self.n_partial_branches + other.n_partial_branches
)
nums.n_missing_branches = (
self.n_missing_branches + other.n_missing_branches
)
return nums
def __radd__(self, other):
# Implementing 0+Numbers allows us to sum() a list of Numbers.
if other == 0:
return self
return NotImplemented
@contract(total='number', fail_under='number', precision=int, returns=bool)
def should_fail_under(total, fail_under, precision):
"""Determine if a total should fail due to fail-under.
`total` is a float, the coverage measurement total. `fail_under` is the
fail_under setting to compare with. `precision` is the number of digits
to consider after the decimal point.
Returns True if the total should fail.
"""
# Special case for fail_under=100, it must really be 100.
if fail_under == 100.0 and total != 100.0:
return True
return round(total, precision) < fail_under
| blueyed/coveragepy | coverage/results.py | Python | apache-2.0 | 10,222 |
# Copyright 2013 IBM Corp.
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
import math
import time
from oslo.serialization import jsonutils
from oslo.utils import strutils
import six
import webob
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack import versioned_method
from nova import exception
from nova import i18n
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.openstack.common import log as logging
from nova import utils
from nova import wsgi
LOG = logging.getLogger(__name__)
_SUPPORTED_CONTENT_TYPES = (
'application/json',
'application/vnd.openstack.compute+json',
)
_MEDIA_TYPE_MAP = {
'application/vnd.openstack.compute+json': 'json',
'application/json': 'json',
}
# These are typically automatically created by routes as either defaults
# collection or member methods.
_ROUTES_METHODS = [
'create',
'delete',
'show',
'update',
]
_METHODS_WITH_BODY = [
'POST',
'PUT',
]
# The default api version request if none is requested in the headers
# Note(cyeoh): This only applies for the v2.1 API once microversions
# support is fully merged. It does not affect the V2 API.
DEFAULT_API_VERSION = "2.1"
# name of attribute to keep version method information
VER_METHOD_ATTR = 'versioned_methods'
# Name of header used by clients to request a specific version
# of the REST API
API_VERSION_REQUEST_HEADER = 'X-OpenStack-Compute-API-Version'
def get_supported_content_types():
return _SUPPORTED_CONTENT_TYPES
def get_media_map():
return dict(_MEDIA_TYPE_MAP.items())
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def __init__(self, *args, **kwargs):
super(Request, self).__init__(*args, **kwargs)
self._extension_data = {'db_items': {}}
if not hasattr(self, 'api_version_request'):
self.api_version_request = api_version.APIVersionRequest()
def cache_db_items(self, key, items, item_key='id'):
"""Allow API methods to store objects from a DB query to be
used by API extensions within the same API request.
An instance of this class only lives for the lifetime of a
single API request, so there's no need to implement full
cache management.
"""
db_items = self._extension_data['db_items'].setdefault(key, {})
for item in items:
db_items[item[item_key]] = item
def get_db_items(self, key):
"""Allow an API extension to get previously stored objects within
the same API request.
Note that the object data will be slightly stale.
"""
return self._extension_data['db_items'][key]
def get_db_item(self, key, item_key):
"""Allow an API extension to get a previously stored object
within the same API request.
Note that the object data will be slightly stale.
"""
return self.get_db_items(key).get(item_key)
def cache_db_instances(self, instances):
self.cache_db_items('instances', instances, 'uuid')
def cache_db_instance(self, instance):
self.cache_db_items('instances', [instance], 'uuid')
def get_db_instances(self):
return self.get_db_items('instances')
def get_db_instance(self, instance_uuid):
return self.get_db_item('instances', instance_uuid)
def cache_db_flavors(self, flavors):
self.cache_db_items('flavors', flavors, 'flavorid')
def cache_db_flavor(self, flavor):
self.cache_db_items('flavors', [flavor], 'flavorid')
def get_db_flavors(self):
return self.get_db_items('flavors')
def get_db_flavor(self, flavorid):
return self.get_db_item('flavors', flavorid)
def cache_db_compute_nodes(self, compute_nodes):
self.cache_db_items('compute_nodes', compute_nodes, 'id')
def cache_db_compute_node(self, compute_node):
self.cache_db_items('compute_nodes', [compute_node], 'id')
def get_db_compute_nodes(self):
return self.get_db_items('compute_nodes')
def get_db_compute_node(self, id):
return self.get_db_item('compute_nodes', id)
def best_match_content_type(self):
"""Determine the requested response content-type."""
if 'nova.best_content_type' not in self.environ:
# Calculate the best MIME type
content_type = None
# Check URL path suffix
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
possible_type = 'application/' + parts[1]
if possible_type in get_supported_content_types():
content_type = possible_type
if not content_type:
content_type = self.accept.best_match(
get_supported_content_types())
self.environ['nova.best_content_type'] = (content_type or
'application/json')
return self.environ['nova.best_content_type']
def get_content_type(self):
"""Determine content type of the request body.
Does not do any body introspection, only checks header
"""
if "Content-Type" not in self.headers:
return None
content_type = self.content_type
# NOTE(markmc): text/plain is the default for eventlet and
# other webservers which use mimetools.Message.gettype()
# whereas twisted defaults to ''.
if not content_type or content_type == 'text/plain':
return None
if content_type not in get_supported_content_types():
raise exception.InvalidContentType(content_type=content_type)
return content_type
def best_match_language(self):
"""Determine the best available language for the request.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
return self.accept_language.best_match(
i18n.get_available_languages())
def set_api_version_request(self):
"""Set API version request based on the request header information."""
if API_VERSION_REQUEST_HEADER in self.headers:
hdr_string = self.headers[API_VERSION_REQUEST_HEADER]
# 'latest' is a special keyword which is equivalent to requesting
# the maximum version of the API supported
if hdr_string == 'latest':
self.api_version_request = api_version.max_api_version()
else:
self.api_version_request = api_version.APIVersionRequest(
hdr_string)
# Check that the version requested is within the global
# minimum/maximum of supported API versions
if not self.api_version_request.matches(
api_version.min_api_version(),
api_version.max_api_version()):
raise exception.InvalidGlobalAPIVersion(
req_ver=self.api_version_request.get_string(),
min_ver=api_version.min_api_version().get_string(),
max_ver=api_version.max_api_version().get_string())
else:
self.api_version_request = api_version.APIVersionRequest(
api_version.DEFAULT_API_VERSION)
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization."""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization."""
def default(self, data):
return jsonutils.dumps(data)
def serializers(**serializers):
"""Attaches serializers to a method.
This decorator associates a dictionary of serializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_serializers'):
func.wsgi_serializers = {}
func.wsgi_serializers.update(serializers)
return func
return decorator
def deserializers(**deserializers):
"""Attaches deserializers to a method.
This decorator associates a dictionary of deserializers with a
method. Note that the function attributes are directly
manipulated; the method is not wrapped.
"""
def decorator(func):
if not hasattr(func, 'wsgi_deserializers'):
func.wsgi_deserializers = {}
func.wsgi_deserializers.update(deserializers)
return func
return decorator
def response(code):
"""Attaches response code to a method.
This decorator associates a response code with a method. Note
that the function attributes are directly manipulated; the method
is not wrapped.
"""
def decorator(func):
func.wsgi_code = code
return func
return decorator
class ResponseObject(object):
"""Bundles a response object with appropriate serializers.
Object that app methods may return in order to bind alternate
serializers with a response object to be serialized. Its use is
optional.
"""
def __init__(self, obj, code=None, headers=None, **serializers):
"""Binds serializers with an object.
Takes keyword arguments akin to the @serializer() decorator
for specifying serializers. Serializers specified will be
given preference over default serializers or method-specific
serializers on return.
"""
self.obj = obj
self.serializers = serializers
self._default_code = 200
self._code = code
self._headers = headers or {}
self.serializer = None
self.media_type = None
def __getitem__(self, key):
"""Retrieves a header with the given name."""
return self._headers[key.lower()]
def __setitem__(self, key, value):
"""Sets a header with the given name to the given value."""
self._headers[key.lower()] = value
def __delitem__(self, key):
"""Deletes the header with the given name."""
del self._headers[key.lower()]
def _bind_method_serializers(self, meth_serializers):
"""Binds method serializers with the response object.
Binds the method serializers with the response object.
Serializers specified to the constructor will take precedence
over serializers specified to this method.
:param meth_serializers: A dictionary with keys mapping to
response types and values containing
serializer objects.
"""
# We can't use update because that would be the wrong
# precedence
for mtype, serializer in meth_serializers.items():
self.serializers.setdefault(mtype, serializer)
def get_serializer(self, content_type, default_serializers=None):
"""Returns the serializer for the wrapped object.
Returns the serializer for the wrapped object subject to the
indicated content type. If no serializer matching the content
type is attached, an appropriate serializer drawn from the
default serializers will be used. If no appropriate
serializer is available, raises InvalidContentType.
"""
default_serializers = default_serializers or {}
try:
mtype = get_media_map().get(content_type, content_type)
if mtype in self.serializers:
return mtype, self.serializers[mtype]
else:
return mtype, default_serializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def preserialize(self, content_type, default_serializers=None):
"""Prepares the serializer that will be used to serialize.
Determines the serializer that will be used and prepares an
instance of it for later call. This allows the serializer to
be accessed by extensions for, e.g., template extension.
"""
mtype, serializer = self.get_serializer(content_type,
default_serializers)
self.media_type = mtype
self.serializer = serializer()
def attach(self, **kwargs):
"""Attach slave templates to serializers."""
if self.media_type in kwargs:
self.serializer.attach(kwargs[self.media_type])
def serialize(self, request, content_type, default_serializers=None):
"""Serializes the wrapped object.
Utility method for serializing the wrapped object. Returns a
webob.Response object.
"""
if self.serializer:
serializer = self.serializer
else:
_mtype, _serializer = self.get_serializer(content_type,
default_serializers)
serializer = _serializer()
response = webob.Response()
response.status_int = self.code
for hdr, value in self._headers.items():
response.headers[hdr] = utils.utf8(str(value))
response.headers['Content-Type'] = utils.utf8(content_type)
if self.obj is not None:
response.body = serializer.serialize(self.obj)
return response
@property
def code(self):
"""Retrieve the response status."""
return self._code or self._default_code
@property
def headers(self):
"""Retrieve the headers."""
return self._headers.copy()
def action_peek_json(body):
"""Determine action to invoke."""
try:
decoded = jsonutils.loads(body)
except ValueError:
msg = _("cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
# Make sure there's exactly one key...
if len(decoded) != 1:
msg = _("too many body keys")
raise exception.MalformedRequestBody(reason=msg)
# Return the action and the decoded body...
return decoded.keys()[0]
class ResourceExceptionHandler(object):
"""Context manager to handle Resource exceptions.
Used when processing exceptions generated by API implementation
methods (or their extensions). Converts most exceptions to Fault
exceptions, with the appropriate logging.
"""
def __enter__(self):
return None
def __exit__(self, ex_type, ex_value, ex_traceback):
if not ex_value:
return True
if isinstance(ex_value, exception.Forbidden):
raise Fault(webob.exc.HTTPForbidden(
explanation=ex_value.format_message()))
elif isinstance(ex_value, exception.Invalid):
raise Fault(exception.ConvertedException(
code=ex_value.code,
explanation=ex_value.format_message()))
elif isinstance(ex_value, TypeError):
exc_info = (ex_type, ex_value, ex_traceback)
LOG.error(_LE('Exception handling resource: %s'), ex_value,
exc_info=exc_info)
raise Fault(webob.exc.HTTPBadRequest())
elif isinstance(ex_value, Fault):
LOG.info(_LI("Fault thrown: %s"), ex_value)
raise ex_value
elif isinstance(ex_value, webob.exc.HTTPException):
LOG.info(_LI("HTTP exception thrown: %s"), ex_value)
raise Fault(ex_value)
# We didn't handle the exception
return False
class Resource(wsgi.Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
Exceptions derived from webob.exc.HTTPException will be automatically
wrapped in Fault() to provide API friendly error responses.
"""
support_api_request_version = False
def __init__(self, controller, action_peek=None, inherits=None,
**deserializers):
""":param controller: object that implement methods created by routes
lib
:param action_peek: dictionary of routines for peeking into an
action request body to determine the
desired action
:param inherits: another resource object that this resource should
inherit extensions from. Any action extensions that
are applied to the parent resource will also apply
to this resource.
"""
self.controller = controller
default_deserializers = dict(json=JSONDeserializer)
default_deserializers.update(deserializers)
self.default_deserializers = default_deserializers
self.default_serializers = dict(json=JSONDictSerializer)
self.action_peek = dict(json=action_peek_json)
self.action_peek.update(action_peek or {})
# Copy over the actions dictionary
self.wsgi_actions = {}
if controller:
self.register_actions(controller)
# Save a mapping of extensions
self.wsgi_extensions = {}
self.wsgi_action_extensions = {}
self.inherits = inherits
def register_actions(self, controller):
"""Registers controller actions with this resource."""
actions = getattr(controller, 'wsgi_actions', {})
for key, method_name in actions.items():
self.wsgi_actions[key] = getattr(controller, method_name)
def register_extensions(self, controller):
"""Registers controller extensions with this resource."""
extensions = getattr(controller, 'wsgi_extensions', [])
for method_name, action_name in extensions:
# Look up the extending method
extension = getattr(controller, method_name)
if action_name:
# Extending an action...
if action_name not in self.wsgi_action_extensions:
self.wsgi_action_extensions[action_name] = []
self.wsgi_action_extensions[action_name].append(extension)
else:
# Extending a regular method
if method_name not in self.wsgi_extensions:
self.wsgi_extensions[method_name] = []
self.wsgi_extensions[method_name].append(extension)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
# NOTE(Vek): Check for get_action_args() override in the
# controller
if hasattr(self.controller, 'get_action_args'):
return self.controller.get_action_args(request_environment)
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except (KeyError, IndexError, AttributeError):
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
def get_body(self, request):
try:
content_type = request.get_content_type()
except exception.InvalidContentType:
LOG.debug("Unrecognized Content-Type provided in request")
return None, ''
return content_type, request.body
def deserialize(self, meth, content_type, body):
meth_deserializers = getattr(meth, 'wsgi_deserializers', {})
try:
mtype = get_media_map().get(content_type, content_type)
if mtype in meth_deserializers:
deserializer = meth_deserializers[mtype]
else:
deserializer = self.default_deserializers[mtype]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
if (hasattr(deserializer, 'want_controller')
and deserializer.want_controller):
return deserializer(self.controller).deserialize(body)
else:
return deserializer().deserialize(body)
def pre_process_extensions(self, extensions, request, action_args):
# List of callables for post-processing extensions
post = []
for ext in extensions:
if inspect.isgeneratorfunction(ext):
response = None
# If it's a generator function, the part before the
# yield is the preprocessing stage
try:
with ResourceExceptionHandler():
gen = ext(req=request, **action_args)
response = gen.next()
except Fault as ex:
response = ex
# We had a response...
if response:
return response, []
# No response, queue up generator for post-processing
post.append(gen)
else:
# Regular functions only perform post-processing
post.append(ext)
# Run post-processing in the reverse order
return None, reversed(post)
def post_process_extensions(self, extensions, resp_obj, request,
action_args):
for ext in extensions:
response = None
if inspect.isgenerator(ext):
# If it's a generator, run the second half of
# processing
try:
with ResourceExceptionHandler():
response = ext.send(resp_obj)
except StopIteration:
# Normal exit of generator
continue
except Fault as ex:
response = ex
else:
# Regular functions get post-processing...
try:
with ResourceExceptionHandler():
response = ext(req=request, resp_obj=resp_obj,
**action_args)
except Fault as ex:
response = ex
# We had a response...
if response:
return response
return None
def _should_have_body(self, request):
return request.method in _METHODS_WITH_BODY
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
if self.support_api_request_version:
# Set the version of the API requested based on the header
try:
request.set_api_version_request()
except exception.InvalidAPIVersionString as e:
return Fault(webob.exc.HTTPBadRequest(
explanation=e.format_message()))
except exception.InvalidGlobalAPIVersion as e:
return Fault(webob.exc.HTTPNotAcceptable(
explanation=e.format_message()))
# Identify the action, its arguments, and the requested
# content type
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
content_type, body = self.get_body(request)
accept = request.best_match_content_type()
# NOTE(Vek): Splitting the function up this way allows for
# auditing by external tools that wrap the existing
# function. If we try to audit __call__(), we can
# run into troubles due to the @webob.dec.wsgify()
# decorator.
return self._process_stack(request, action, action_args,
content_type, body, accept)
def _process_stack(self, request, action, action_args,
content_type, body, accept):
"""Implement the processing stack."""
# Get the implementing method
try:
meth, extensions = self.get_method(request, action,
content_type, body)
except (AttributeError, TypeError):
return Fault(webob.exc.HTTPNotFound())
except KeyError as ex:
msg = _("There is no such action: %s") % ex.args[0]
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
if body:
msg = _("Action: '%(action)s', calling method: %(meth)s, body: "
"%(body)s") % {'action': action,
'body': unicode(body, 'utf-8'),
'meth': str(meth)}
LOG.debug(strutils.mask_password(msg))
else:
LOG.debug("Calling method '%(meth)s'",
{'meth': str(meth)})
# Now, deserialize the request body...
try:
contents = {}
if self._should_have_body(request):
# allow empty body with PUT and POST
if request.content_length == 0:
contents = {'body': None}
else:
contents = self.deserialize(meth, content_type, body)
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
# Update the action args
action_args.update(contents)
project_id = action_args.pop("project_id", None)
context = request.environ.get('nova.context')
# Following check ,we are doing in quotas.py
"""if (context and project_id and (project_id != context.project_id)):
msg = _("Malformed request URL: URL's project_id '%(project_id)s'"
" doesn't match Context's project_id"
" '%(context_project_id)s'") % \
{'project_id': project_id,
'context_project_id': context.project_id}
return Fault(webob.exc.HTTPBadRequest(explanation=msg))"""
# Run pre-processing extensions
response, post = self.pre_process_extensions(extensions,
request, action_args)
if not response:
try:
with ResourceExceptionHandler():
action_result = self.dispatch(meth, request, action_args)
except Fault as ex:
response = ex
if not response:
# No exceptions; convert action_result into a
# ResponseObject
resp_obj = None
if type(action_result) is dict or action_result is None:
resp_obj = ResponseObject(action_result)
elif isinstance(action_result, ResponseObject):
resp_obj = action_result
else:
response = action_result
# Run post-processing extensions
if resp_obj:
# Do a preserialize to set up the response object
serializers = getattr(meth, 'wsgi_serializers', {})
resp_obj._bind_method_serializers(serializers)
if hasattr(meth, 'wsgi_code'):
resp_obj._default_code = meth.wsgi_code
resp_obj.preserialize(accept, self.default_serializers)
# Process post-processing extensions
response = self.post_process_extensions(post, resp_obj,
request, action_args)
if resp_obj and not response:
response = resp_obj.serialize(request, accept,
self.default_serializers)
if hasattr(response, 'headers'):
for hdr, val in response.headers.items():
# Headers must be utf-8 strings
response.headers[hdr] = utils.utf8(str(val))
if not request.api_version_request.is_null():
response.headers[API_VERSION_REQUEST_HEADER] = \
request.api_version_request.get_string()
response.headers['Vary'] = API_VERSION_REQUEST_HEADER
return response
def get_method(self, request, action, content_type, body):
meth, extensions = self._get_method(request,
action,
content_type,
body)
if self.inherits:
_meth, parent_ext = self.inherits.get_method(request,
action,
content_type,
body)
extensions.extend(parent_ext)
return meth, extensions
def _get_method(self, request, action, content_type, body):
"""Look up the action-specific method and its extensions."""
# Look up the method
try:
if not self.controller:
meth = getattr(self, action)
else:
meth = getattr(self.controller, action)
except AttributeError:
if (not self.wsgi_actions or
action not in _ROUTES_METHODS + ['action']):
# Propagate the error
raise
else:
return meth, self.wsgi_extensions.get(action, [])
if action == 'action':
# OK, it's an action; figure out which action...
mtype = get_media_map().get(content_type)
action_name = self.action_peek[mtype](body)
else:
action_name = action
# Look up the action method
return (self.wsgi_actions[action_name],
self.wsgi_action_extensions.get(action_name, []))
def dispatch(self, method, request, action_args):
"""Dispatch a call to the action-specific method."""
try:
return method(req=request, **action_args)
except exception.VersionNotFoundForAPIMethod:
# We deliberately don't return any message information
# about the exception to the user so it looks as if
# the method is simply not implemented.
return Fault(webob.exc.HTTPNotFound())
class ResourceV21(Resource):
support_api_request_version = True
def action(name):
"""Mark a function as an action.
The given name will be taken as the action key in the body.
This is also overloaded to allow extensions to provide
non-extending definitions of create and delete operations.
"""
def decorator(func):
func.wsgi_action = name
return func
return decorator
def extends(*args, **kwargs):
"""Indicate a function extends an operation.
Can be used as either::
@extends
def index(...):
pass
or as::
@extends(action='resize')
def _action_resize(...):
pass
"""
def decorator(func):
# Store enough information to find what we're extending
func.wsgi_extends = (func.__name__, kwargs.get('action'))
return func
# If we have positional arguments, call the decorator
if args:
return decorator(*args)
# OK, return the decorator instead
return decorator
class ControllerMetaclass(type):
"""Controller metaclass.
This metaclass automates the task of assembling a dictionary
mapping action keys to method names.
"""
def __new__(mcs, name, bases, cls_dict):
"""Adds the wsgi_actions dictionary to the class."""
# Find all actions
actions = {}
extensions = []
versioned_methods = None
# start with wsgi actions from base classes
for base in bases:
actions.update(getattr(base, 'wsgi_actions', {}))
if base.__name__ == "Controller":
# NOTE(cyeoh): This resets the VER_METHOD_ATTR attribute
# between API controller class creations. This allows us
# to use a class decorator on the API methods that doesn't
# require naming explicitly what method is being versioned as
# it can be implicit based on the method decorated. It is a bit
# ugly.
if VER_METHOD_ATTR in base.__dict__:
versioned_methods = getattr(base, VER_METHOD_ATTR)
delattr(base, VER_METHOD_ATTR)
for key, value in cls_dict.items():
if not callable(value):
continue
if getattr(value, 'wsgi_action', None):
actions[value.wsgi_action] = key
elif getattr(value, 'wsgi_extends', None):
extensions.append(value.wsgi_extends)
# Add the actions and extensions to the class dict
cls_dict['wsgi_actions'] = actions
cls_dict['wsgi_extensions'] = extensions
if versioned_methods:
cls_dict[VER_METHOD_ATTR] = versioned_methods
return super(ControllerMetaclass, mcs).__new__(mcs, name, bases,
cls_dict)
@six.add_metaclass(ControllerMetaclass)
class Controller(object):
"""Default controller."""
_view_builder_class = None
def __init__(self, view_builder=None):
"""Initialize controller with a view builder instance."""
if view_builder:
self._view_builder = view_builder
elif self._view_builder_class:
self._view_builder = self._view_builder_class()
else:
self._view_builder = None
def __getattribute__(self, key):
def version_select(*args, **kwargs):
"""Look for the method which matches the name supplied and version
constraints and calls it with the supplied arguments.
@return: Returns the result of the method called
@raises: VersionNotFoundForAPIMethod if there is no method which
matches the name and version constraints
"""
# The first arg to all versioned methods is always the request
# object. The version for the request is attached to the
# request object
if len(args) == 0:
ver = kwargs['req'].api_version_request
else:
ver = args[0].api_version_request
func_list = self.versioned_methods[key]
for func in func_list:
if ver.matches(func.start_version, func.end_version):
# Update the version_select wrapper function so
# other decorator attributes like wsgi.response
# are still respected.
functools.update_wrapper(version_select, func.func)
return func.func(self, *args, **kwargs)
# No version match
raise exception.VersionNotFoundForAPIMethod(version=ver)
try:
version_meth_dict = object.__getattribute__(self, VER_METHOD_ATTR)
except AttributeError:
# No versioning on this class
return object.__getattribute__(self, key)
if version_meth_dict and \
key in object.__getattribute__(self, VER_METHOD_ATTR):
return version_select
return object.__getattribute__(self, key)
# NOTE(cyeoh): This decorator MUST appear first (the outermost
# decorator) on an API method for it to work correctly
@classmethod
def api_version(cls, min_ver, max_ver=None):
"""Decorator for versioning api methods.
Add the decorator to any method which takes a request object
as the first parameter and belongs to a class which inherits from
wsgi.Controller.
@min_ver: string representing minimum version
@max_ver: optional string representing maximum version
"""
def decorator(f):
obj_min_ver = api_version.APIVersionRequest(min_ver)
if max_ver:
obj_max_ver = api_version.APIVersionRequest(max_ver)
else:
obj_max_ver = api_version.APIVersionRequest()
# Add to list of versioned methods registered
func_name = f.__name__
new_func = versioned_method.VersionedMethod(
func_name, obj_min_ver, obj_max_ver, f)
func_dict = getattr(cls, VER_METHOD_ATTR, {})
if not func_dict:
setattr(cls, VER_METHOD_ATTR, func_dict)
func_list = func_dict.get(func_name, [])
if not func_list:
func_dict[func_name] = func_list
func_list.append(new_func)
# Ensure the list is sorted by minimum version (reversed)
# so later when we work through the list in order we find
# the method which has the latest version which supports
# the version requested.
# TODO(cyeoh): Add check to ensure that there are no overlapping
# ranges of valid versions as that is amibiguous
func_list.sort(key=lambda f: f.start_version, reverse=True)
return f
return decorator
@staticmethod
def is_valid_body(body, entity_name):
if not (body and entity_name in body):
return False
def is_dict(d):
try:
d.get(None)
return True
except AttributeError:
return False
return is_dict(body[entity_name])
class Fault(webob.exc.HTTPException):
"""Wrap webob.exc.HTTPException to provide API friendly response."""
_fault_names = {
400: "badRequest",
401: "unauthorized",
403: "forbidden",
404: "itemNotFound",
405: "badMethod",
409: "conflictingRequest",
413: "overLimit",
415: "badMediaType",
429: "overLimit",
501: "notImplemented",
503: "serviceUnavailable"}
def __init__(self, exception):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
for key, value in self.wrapped_exc.headers.items():
self.wrapped_exc.headers[key] = str(value)
self.status_int = exception.status_int
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
user_locale = req.best_match_language()
# Replace the body with fault details.
code = self.wrapped_exc.status_int
fault_name = self._fault_names.get(code, "computeFault")
explanation = self.wrapped_exc.explanation
LOG.debug("Returning %(code)s to user: %(explanation)s",
{'code': code, 'explanation': explanation})
explanation = i18n.translate(explanation, user_locale)
fault_data = {
fault_name: {
'code': code,
'message': explanation}}
if code == 413 or code == 429:
retry = self.wrapped_exc.headers.get('Retry-After', None)
if retry:
fault_data[fault_name]['retryAfter'] = retry
if not req.api_version_request.is_null():
self.wrapped_exc.headers[API_VERSION_REQUEST_HEADER] = \
req.api_version_request.get_string()
self.wrapped_exc.headers['Vary'] = \
API_VERSION_REQUEST_HEADER
content_type = req.best_match_content_type()
serializer = {
'application/json': JSONDictSerializer(),
}[content_type]
self.wrapped_exc.body = serializer.serialize(fault_data)
self.wrapped_exc.content_type = content_type
return self.wrapped_exc
def __str__(self):
return self.wrapped_exc.__str__()
class RateLimitFault(webob.exc.HTTPException):
"""Rate-limited request response."""
def __init__(self, message, details, retry_time):
"""Initialize new `RateLimitFault` with relevant information."""
hdrs = RateLimitFault._retry_after(retry_time)
self.wrapped_exc = webob.exc.HTTPTooManyRequests(headers=hdrs)
self.content = {
"overLimit": {
"code": self.wrapped_exc.status_int,
"message": message,
"details": details,
"retryAfter": hdrs['Retry-After'],
},
}
@staticmethod
def _retry_after(retry_time):
delay = int(math.ceil(retry_time - time.time()))
retry_after = delay if delay > 0 else 0
headers = {'Retry-After': '%d' % retry_after}
return headers
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""Return the wrapped exception with a serialized body conforming
to our error format.
"""
user_locale = request.best_match_language()
content_type = request.best_match_content_type()
self.content['overLimit']['message'] = \
i18n.translate(self.content['overLimit']['message'], user_locale)
self.content['overLimit']['details'] = \
i18n.translate(self.content['overLimit']['details'], user_locale)
serializer = {
'application/json': JSONDictSerializer(),
}[content_type]
content = serializer.serialize(self.content)
self.wrapped_exc.body = content
self.wrapped_exc.content_type = content_type
return self.wrapped_exc
| sajeeshcs/nested_quota_latest | nova/api/openstack/wsgi.py | Python | apache-2.0 | 44,219 |
from .stopper import EarlyStopper
from .progbar import ProgressBar
from .utils import split_arr
from .data_iterator import SequentialIterator
from tensorflow.python.framework import ops
import tensorflow as tf
import logging
logging.basicConfig(format='%(module)s.%(funcName)s %(lineno)d:%(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def train(session, feed_dict, train_cost_sb, valid_cost_sb, optimizer, epoch_look_back=5,
max_epoch=100, percent_decrease=0, train_valid_ratio=[5,1], batchsize=64,
randomize_split=False):
"""
Example training object for training a dataset
"""
train_arrs = []
valid_arrs = []
phs = []
for ph, arr in feed_dict.items():
train_arr, valid_arr = split_arr(arr, train_valid_ratio, randomize=randomize_split)
phs.append(ph)
train_arrs.append(train_arr)
valid_arrs.append(valid_arr)
iter_train = SequentialIterator(*train_arrs, batchsize=batchsize)
iter_valid = SequentialIterator(*valid_arrs, batchsize=batchsize)
es = EarlyStopper(max_epoch, epoch_look_back, percent_decrease)
# required for BatchNormalization layer
update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
with ops.control_dependencies(update_ops):
train_op = optimizer.minimize(train_cost_sb)
init = tf.global_variables_initializer()
session.run(init)
epoch = 0
while True:
epoch += 1
##############################[ Training ]##############################
print('\n')
logger.info('<<<<<[ epoch: {} ]>>>>>'.format(epoch))
logger.info('..training')
pbar = ProgressBar(len(iter_train))
ttl_exp = 0
mean_train_cost = 0
for batches in iter_train:
fd = dict(zip(phs, batches))
train_cost, _ = session.run([train_cost_sb, train_op], feed_dict=fd)
mean_train_cost += train_cost * len(batches[0])
ttl_exp += len(batches[0])
pbar.update(ttl_exp)
print('')
mean_train_cost /= ttl_exp
logger.info('..average train cost: {}'.format(mean_train_cost))
##############################[ Validating ]############################
logger.info('..validating')
pbar = ProgressBar(len(iter_valid))
ttl_exp = 0
mean_valid_cost = 0
for batches in iter_valid:
fd = dict(zip(phs, batches))
valid_cost = session.run(valid_cost_sb, feed_dict=fd)
mean_valid_cost += valid_cost * len(batches[0])
ttl_exp += len(batches[0])
pbar.update(ttl_exp)
print('')
mean_valid_cost /= ttl_exp
logger.info('..average valid cost: {}'.format(mean_valid_cost))
if es.continue_learning(mean_valid_cost, epoch=epoch):
logger.info('best epoch last update: {}'.format(es.best_epoch_last_update))
logger.info('best valid last update: {}'.format(es.best_valid_last_update))
else:
logger.info('training done!')
break
| hycis/TensorGraph | tensorgraph/trainobject.py | Python | apache-2.0 | 3,080 |
# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
from ryu.topology.event import EventSwitchEnter, EventSwitchReconnected
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
#
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
# actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
# ofproto.OFPCML_NO_BUFFER)]
actions = [parser.OFPActionOutput(ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match,
actions, buffer_id=None, table_id=0):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst, table_id=table_id)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst, table_id=table_id)
datapath.send_msg(mod)
# new switch detected
@set_ev_cls([EventSwitchEnter, EventSwitchReconnected])
def _ev_switch_enter_handler(self, ev):
datapath = ev.switch.dp
self.logger.info('registered OF switch id: %s' % datapath.id)
ofproto = datapath.ofproto
self.logger.info('OF version: {0}'.format(ofproto))
# send NORMAL action for all undefined flows
ofp_parser = datapath.ofproto_parser
actions = [ofp_parser.OFPActionOutput(ofproto_v1_3.OFPP_NORMAL)]
self.add_flow(datapath, 0, None, actions, table_id=0)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
| mpeuster/son-emu | src/emuvim/dcemulator/son_emu_simple_switch_13.py | Python | apache-2.0 | 5,519 |
import kazoo.client
import kazoo.exceptions
class MockStruct(object):
"""
Basic object to allow adding arbitrary properties
"""
class MockKazooClient(object):
"""
In-memory version of the kazoo client for unit testing
"""
def __init__(self):
self.store = {'/': None}
self.ephemerals = set()
self.versions = {}
self._connected = False
def start(self):
self._connected = True
def stop(self):
for eph in self.ephemerals:
self.store.pop(eph)
self._connected = False
@property
def connected(self):
return self._connected
@property
def client_id(self):
return 123, 'password'
def restart(self):
self.stop()
self.start()
def close(self):
pass
def create(self, path, data, ephemeral=False, sequence=False,
makepath=False):
# TODO: sequence not implemented
if path in self.store or path == '/':
raise kazoo.exceptions.NodeExistsError
parts = path.split('/')
for i in xrange(1, len(parts)):
subpath = '/'.join(parts[:i])
if not subpath:
subpath = '/'
if subpath in self.store and subpath in self.ephemerals:
raise kazoo.exceptions.NoChildrenForEphemeralsError
if subpath not in self.store:
if makepath or subpath == '/':
self.store[subpath] = None
else:
raise kazoo.exceptions.NoNodeError
if ephemeral:
self.ephemerals.add(path)
self.store[path] = data
return path
def ensure_path(self, path):
if path in self.store or path == '/':
return
parts = path.split('/')
for i in xrange(1, len(parts)):
subpath = '/'.join(parts[:i])
if subpath not in self.store:
self.store[subpath] = None
self.store[path] = None
def exists(self, path):
return path if path in self.store else None
def get(self, path):
# TODO: manage versions internally better
if path not in self.store:
raise kazoo.exceptions.NoNodeError
get_stat = MockStruct()
get_stat.version = -1
return self.store[path], True
def get_children(self, path, include_data=False):
# TODO: include_data doesn't do the right thing
if path not in self.store:
raise kazoo.exceptions.NoNodeError
parts = path.split('/')
result = []
for existpath, data in self.store.iteritems():
if existpath.startswith(path):
existparts = existpath.split('/')
if len(existparts) == len(parts) + 1:
if include_data:
result.append((existpath, data))
else:
result.append(existpath)
return result
def set(self, path, data, version=-1):
if path not in self.store:
raise kazoo.exceptions.NoNodeError
if path in self.versions:
existversion = self.versions[path]
if version != -1 and existversion > version:
raise kazoo.exceptions.BadVersionError
self.store[path] = data
self.versions[path] = version
def delete(self, path, version=-1, recursive=False):
if path not in self.store:
raise kazoo.exceptions.NoNodeError
existversion = -1
if path in self.versions:
existversion = self.versions[path]
if version != -1 and existversion > version:
raise kazoo.exceptions.BadVersionError
to_pop = []
for existpath in self.store.iterkeys():
if existpath.startswith(path):
if path != existpath and not recursive:
raise kazoo.exceptions.NotEmptyError
to_pop.append(existpath)
for subpath in to_pop:
self.store.pop(subpath)
if subpath in self.versions:
self.versions.pop(subpath)
if subpath in self.ephemerals:
self.ephemerals.pop(subpath)
if path == '/':
self.store = {'/': None}
def add_listener(self, unused):
pass
def remove_listener(self, unused):
pass
| kanakb/pyhelix | tests/mockclient.py | Python | apache-2.0 | 4,413 |
# Copyright 2016 Hewlett Packard Enterprise Development, LP
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from keystoneauth1 import loading as ks_loading
import netaddr
from neutron_lib.api.definitions import ip_allocation as ipalloc_apidef
from neutron_lib.api.definitions import l2_adjacency as l2adj_apidef
from neutron_lib.api.definitions import network as net_def
from neutron_lib.api.definitions import port as port_def
from neutron_lib.api.definitions import subnet as subnet_def
from neutron_lib.api import validators
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib.exceptions import placement as placement_exc
from neutron_lib.plugins import directory
from novaclient import client as nova_client
from novaclient import exceptions as nova_exc
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from neutron._i18n import _
from neutron.db import _resource_extend as resource_extend
from neutron.extensions import segment
from neutron.notifiers import batch_notifier
from neutron.objects import network as net_obj
from neutron.objects import subnet as subnet_obj
from neutron.services.segments import db
from neutron.services.segments import exceptions
from neutron.services.segments import placement_client
LOG = log.getLogger(__name__)
NOVA_API_VERSION = '2.41'
IPV4_RESOURCE_CLASS = 'IPV4_ADDRESS'
SEGMENT_NAME_STUB = 'Neutron segment id %s'
MAX_INVENTORY_UPDATE_RETRIES = 10
@resource_extend.has_resource_extenders
@registry.has_registry_receivers
class Plugin(db.SegmentDbMixin, segment.SegmentPluginBase):
_instance = None
supported_extension_aliases = ["segment", "ip_allocation",
l2adj_apidef.ALIAS,
"standard-attr-segment",
"subnet-segmentid-writable",
'segments-peer-subnet-host-routes']
__native_pagination_support = True
__native_sorting_support = True
__filter_validation_support = True
def __init__(self):
self.nova_updater = NovaSegmentNotifier()
self.segment_host_routes = SegmentHostRoutes()
@staticmethod
@resource_extend.extends([net_def.COLLECTION_NAME])
def _extend_network_dict_binding(network_res, network_db):
if not directory.get_plugin('segments'):
return
# TODO(carl_baldwin) Make this work with service subnets when
# it's a thing.
is_adjacent = (not network_db.subnets or
not network_db.subnets[0].segment_id)
network_res[l2adj_apidef.L2_ADJACENCY] = is_adjacent
@staticmethod
@resource_extend.extends([subnet_def.COLLECTION_NAME])
def _extend_subnet_dict_binding(subnet_res, subnet_db):
subnet_res['segment_id'] = subnet_db.get('segment_id')
@staticmethod
@resource_extend.extends([port_def.COLLECTION_NAME])
def _extend_port_dict_binding(port_res, port_db):
if not directory.get_plugin('segments'):
return
value = ipalloc_apidef.IP_ALLOCATION_IMMEDIATE
if port_db.get('ip_allocation'):
value = port_db.get('ip_allocation')
port_res[ipalloc_apidef.IP_ALLOCATION] = value
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
@registry.receives(resources.SEGMENT, [events.BEFORE_DELETE])
def _prevent_segment_delete_with_subnet_associated(
self, resource, event, trigger, context, segment,
for_net_delete=False):
"""Raise exception if there are any subnets associated with segment."""
if for_net_delete:
# don't check if this is a part of a network delete operation
return
segment_id = segment['id']
subnets = subnet_obj.Subnet.get_objects(context,
segment_id=segment_id)
subnet_ids = [s.id for s in subnets]
if subnet_ids:
reason = _("The segment is still associated with subnet(s) "
"%s") % ", ".join(subnet_ids)
raise exceptions.SegmentInUse(segment_id=segment_id,
reason=reason)
class Event(object):
def __init__(self, method, segment_ids, total=None, reserved=None,
segment_host_mappings=None, host=None):
self.method = method
if isinstance(segment_ids, set):
self.segment_ids = segment_ids
else:
self.segment_id = segment_ids
self.total = total
self.reserved = reserved
self.segment_host_mappings = segment_host_mappings
self.host = host
@registry.has_registry_receivers
class NovaSegmentNotifier(object):
def __init__(self):
self.p_client, self.n_client = self._get_clients()
self.batch_notifier = batch_notifier.BatchNotifier(
cfg.CONF.send_events_interval, self._send_notifications)
def _get_clients(self):
p_client = placement_client.PlacementAPIClient()
n_auth = ks_loading.load_auth_from_conf_options(cfg.CONF, 'nova')
n_session = ks_loading.load_session_from_conf_options(
cfg.CONF,
'nova',
auth=n_auth)
extensions = [
ext for ext in nova_client.discover_extensions(NOVA_API_VERSION)
if ext.name == "server_external_events"]
n_client = nova_client.Client(
NOVA_API_VERSION,
session=n_session,
region_name=cfg.CONF.nova.region_name,
endpoint_type=cfg.CONF.nova.endpoint_type,
extensions=extensions)
return p_client, n_client
def _send_notifications(self, batched_events):
for event in batched_events:
try:
event.method(event)
except placement_exc.PlacementEndpointNotFound:
LOG.debug('Placement API was not found when trying to '
'update routed networks IPv4 inventories')
return
def _notify_subnet(self, context, subnet, segment_id):
total, reserved = self._calculate_inventory_total_and_reserved(subnet)
if total:
segment_host_mappings = net_obj.SegmentHostMapping.get_objects(
context, segment_id=segment_id)
self.batch_notifier.queue_event(Event(
self._create_or_update_nova_inventory, segment_id, total=total,
reserved=reserved,
segment_host_mappings=segment_host_mappings))
@registry.receives(resources.SUBNET, [events.AFTER_CREATE])
def _notify_subnet_created(self, resource, event, trigger, context,
subnet, **kwargs):
segment_id = subnet.get('segment_id')
if not segment_id or subnet['ip_version'] != constants.IP_VERSION_4:
return
self._notify_subnet(context, subnet, segment_id)
def _create_or_update_nova_inventory(self, event):
try:
self._update_nova_inventory(event)
except placement_exc.PlacementResourceProviderNotFound:
self._create_nova_inventory(event.segment_id, event.total,
event.reserved,
event.segment_host_mappings)
def _update_nova_inventory(self, event):
for count in range(MAX_INVENTORY_UPDATE_RETRIES):
ipv4_inventory = self.p_client.get_inventory(event.segment_id,
IPV4_RESOURCE_CLASS)
if event.total:
ipv4_inventory['total'] += event.total
if event.reserved:
ipv4_inventory['reserved'] += event.reserved
try:
self.p_client.update_inventory(event.segment_id,
ipv4_inventory,
IPV4_RESOURCE_CLASS)
return
except placement_exc.PlacementInventoryUpdateConflict:
LOG.debug('Re-trying to update Nova IPv4 inventory for '
'routed network segment: %s', event.segment_id)
LOG.error('Failed to update Nova IPv4 inventory for routed '
'network segment: %s', event.segment_id)
def _get_nova_aggregate_uuid(self, aggregate):
try:
return aggregate.uuid
except AttributeError:
with excutils.save_and_reraise_exception():
LOG.exception("uuid was not returned as part of the aggregate "
"object which indicates that the Nova API "
"backend does not support microversions. Ensure "
"that the compute endpoint in the service "
"catalog points to the v2.1 API.")
def _create_nova_inventory(self, segment_id, total, reserved,
segment_host_mappings):
name = SEGMENT_NAME_STUB % segment_id
resource_provider = {'name': name, 'uuid': segment_id}
self.p_client.create_resource_provider(resource_provider)
aggregate = self.n_client.aggregates.create(name, None)
aggregate_uuid = self._get_nova_aggregate_uuid(aggregate)
self.p_client.associate_aggregates(segment_id, [aggregate_uuid])
for mapping in segment_host_mappings:
self.n_client.aggregates.add_host(aggregate.id, mapping.host)
ipv4_inventory = {'total': total, 'reserved': reserved,
'min_unit': 1, 'max_unit': 1, 'step_size': 1,
'allocation_ratio': 1.0,
'resource_class': IPV4_RESOURCE_CLASS}
self.p_client.create_inventory(segment_id, ipv4_inventory)
def _calculate_inventory_total_and_reserved(self, subnet):
total = 0
reserved = 0
allocation_pools = subnet.get('allocation_pools') or []
for pool in allocation_pools:
total += int(netaddr.IPAddress(pool['end']) -
netaddr.IPAddress(pool['start'])) + 1
if total:
if subnet['gateway_ip']:
total += 1
reserved += 1
if subnet['enable_dhcp']:
reserved += 1
return total, reserved
@registry.receives(resources.SUBNET, [events.AFTER_UPDATE])
def _notify_subnet_updated(self, resource, event, trigger, context,
subnet, original_subnet, **kwargs):
segment_id = subnet.get('segment_id')
original_segment_id = original_subnet.get('segment_id')
if not segment_id or subnet['ip_version'] != constants.IP_VERSION_4:
return
if original_segment_id != segment_id:
# Migration to routed network, treat as create
self._notify_subnet(context, subnet, segment_id)
return
filters = {'segment_id': [segment_id],
'ip_version': [constants.IP_VERSION_4]}
if not subnet['allocation_pools']:
plugin = directory.get_plugin()
alloc_pools = [s['allocation_pools'] for s in
plugin.get_subnets(context, filters=filters)]
if not any(alloc_pools):
self.batch_notifier.queue_event(Event(
self._delete_nova_inventory, segment_id))
return
original_total, original_reserved = (
self._calculate_inventory_total_and_reserved(original_subnet))
updated_total, updated_reserved = (
self._calculate_inventory_total_and_reserved(subnet))
total = updated_total - original_total
reserved = updated_reserved - original_reserved
if total or reserved:
segment_host_mappings = None
if not original_subnet['allocation_pools']:
segment_host_mappings = net_obj.SegmentHostMapping.get_objects(
context, segment_id=segment_id)
self.batch_notifier.queue_event(Event(
self._create_or_update_nova_inventory, segment_id, total=total,
reserved=reserved,
segment_host_mappings=segment_host_mappings))
@registry.receives(resources.SUBNET, [events.AFTER_DELETE])
def _notify_subnet_deleted(self, resource, event, trigger, context,
subnet, **kwargs):
segment_id = subnet.get('segment_id')
if not segment_id or subnet['ip_version'] != constants.IP_VERSION_4:
return
total, reserved = self._calculate_inventory_total_and_reserved(subnet)
if total:
filters = {'segment_id': [segment_id], 'ip_version': [4]}
plugin = directory.get_plugin()
if plugin.get_subnets_count(context, filters=filters) > 0:
self.batch_notifier.queue_event(Event(
self._update_nova_inventory, segment_id, total=-total,
reserved=-reserved))
else:
self.batch_notifier.queue_event(Event(
self._delete_nova_inventory, segment_id))
def _get_aggregate_id(self, segment_id):
aggregate_uuid = self.p_client.list_aggregates(
segment_id)['aggregates'][0]
aggregates = self.n_client.aggregates.list()
for aggregate in aggregates:
nc_aggregate_uuid = self._get_nova_aggregate_uuid(aggregate)
if nc_aggregate_uuid == aggregate_uuid:
return aggregate.id
def _delete_nova_inventory(self, event):
aggregate_id = self._get_aggregate_id(event.segment_id)
aggregate = self.n_client.aggregates.get_details(
aggregate_id)
for host in aggregate.hosts:
self.n_client.aggregates.remove_host(aggregate_id,
host)
self.n_client.aggregates.delete(aggregate_id)
self.p_client.delete_resource_provider(event.segment_id)
@registry.receives(resources.SEGMENT_HOST_MAPPING, [events.AFTER_CREATE])
def _notify_host_addition_to_aggregate(self, resource, event, trigger,
context, host, current_segment_ids,
**kwargs):
subnets = subnet_obj.Subnet.get_objects(context,
segment_id=current_segment_ids)
segment_ids = {s.segment_id for s in subnets}
self.batch_notifier.queue_event(Event(self._add_host_to_aggregate,
segment_ids, host=host))
def _add_host_to_aggregate(self, event):
for segment_id in event.segment_ids:
try:
aggregate_id = self._get_aggregate_id(segment_id)
except placement_exc.PlacementAggregateNotFound:
LOG.info('When adding host %(host)s, aggregate not found '
'for routed network segment %(segment_id)s',
{'host': event.host, 'segment_id': segment_id})
continue
try:
self.n_client.aggregates.add_host(aggregate_id, event.host)
except nova_exc.Conflict:
LOG.info('Host %(host)s already exists in aggregate for '
'routed network segment %(segment_id)s',
{'host': event.host, 'segment_id': segment_id})
@registry.receives(resources.PORT,
[events.AFTER_CREATE, events.AFTER_DELETE])
def _notify_port_created_or_deleted(self, resource, event, trigger,
context, port, **kwargs):
if not self._does_port_require_nova_inventory_update(port):
return
ipv4_subnets_number, segment_id = (
self._get_ipv4_subnets_number_and_segment_id(port, context))
if segment_id:
if event == events.AFTER_DELETE:
ipv4_subnets_number = -ipv4_subnets_number
self.batch_notifier.queue_event(Event(self._update_nova_inventory,
segment_id, reserved=ipv4_subnets_number))
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
def _notify_port_updated(self, resource, event, trigger, context,
**kwargs):
port = kwargs.get('port')
original_port = kwargs.get('original_port')
does_original_port_require_nova_inventory_update = (
self._does_port_require_nova_inventory_update(original_port))
does_port_require_nova_inventory_update = (
self._does_port_require_nova_inventory_update(port))
if not (does_original_port_require_nova_inventory_update or
does_port_require_nova_inventory_update):
return
original_port_ipv4_subnets_number, segment_id = (
self._get_ipv4_subnets_number_and_segment_id(original_port,
context))
if not segment_id:
return
port_ipv4_subnets_number = len(self._get_ipv4_subnet_ids(port))
if not does_original_port_require_nova_inventory_update:
original_port_ipv4_subnets_number = 0
if not does_port_require_nova_inventory_update:
port_ipv4_subnets_number = 0
update = port_ipv4_subnets_number - original_port_ipv4_subnets_number
if update:
self.batch_notifier.queue_event(Event(self._update_nova_inventory,
segment_id, reserved=update))
def _get_ipv4_subnets_number_and_segment_id(self, port, context):
ipv4_subnet_ids = self._get_ipv4_subnet_ids(port)
if not ipv4_subnet_ids:
return 0, None
subnet = subnet_obj.Subnet.get_object(context, id=ipv4_subnet_ids[0])
if subnet and subnet.segment_id:
return len(ipv4_subnet_ids), subnet.segment_id
return 0, None
def _does_port_require_nova_inventory_update(self, port):
device_owner = port.get('device_owner')
if (device_owner.startswith(constants.DEVICE_OWNER_COMPUTE_PREFIX) or
device_owner == constants.DEVICE_OWNER_DHCP):
return False
return True
def _get_ipv4_subnet_ids(self, port):
ipv4_subnet_ids = []
for ip in port.get('fixed_ips', []):
if netaddr.IPAddress(
ip['ip_address']).version == constants.IP_VERSION_4:
ipv4_subnet_ids.append(ip['subnet_id'])
return ipv4_subnet_ids
@registry.has_registry_receivers
class SegmentHostRoutes(object):
def _get_subnets(self, context, network_id):
return subnet_obj.Subnet.get_objects(context, network_id=network_id)
def _calculate_routed_network_host_routes(self, context, ip_version,
network_id=None, subnet_id=None,
segment_id=None,
host_routes=None,
gateway_ip=None,
old_gateway_ip=None,
deleted_cidr=None):
"""Calculate host routes for routed network.
This method is used to calculate the host routes for routed networks
both when handling the user create or update request and when making
updates to subnets on the network in response to events: AFTER_CREATE
and AFTER_DELETE.
:param ip_version: IP version (4/6).
:param network_id: Network ID.
:param subnet_id: UUID of the subnet.
:param segment_id: Segement ID associated with the subnet.
:param host_routes: Current host_routes of the subnet.
:param gateway_ip: The subnets gateway IP address.
:param old_gateway_ip: The old gateway IP address of the subnet when it
is changed on update.
:param deleted_cidr: The cidr of a deleted subnet.
:returns Host routes with routes for the other subnet's on the routed
network appended unless a route to the destination already
exists.
"""
if host_routes is None:
host_routes = []
dest_ip_nets = [netaddr.IPNetwork(route['destination']) for
route in host_routes]
# Drop routes to the deleted cidr, when the subnet was deleted.
if deleted_cidr:
delete_route = {'destination': deleted_cidr, 'nexthop': gateway_ip}
if delete_route in host_routes:
host_routes.remove(delete_route)
for subnet in self._get_subnets(context, network_id):
if (subnet.id == subnet_id or subnet.segment_id == segment_id or
subnet.ip_version != ip_version):
continue
subnet_ip_net = netaddr.IPNetwork(subnet.cidr)
if old_gateway_ip:
old_route = {'destination': str(subnet.cidr),
'nexthop': old_gateway_ip}
if old_route in host_routes:
host_routes.remove(old_route)
dest_ip_nets.remove(subnet_ip_net)
if gateway_ip:
# Use netaddr here in case the user provided a summary route
# (supernet route). I.e subnet.cidr = 10.0.1.0/24 and
# the user provided a host route for 10.0.0.0/16. We don't
# need to append a route in this case.
if not any(subnet_ip_net in ip_net for ip_net in dest_ip_nets):
host_routes.append({'destination': subnet.cidr,
'nexthop': gateway_ip})
return host_routes
def _host_routes_need_update(self, host_routes, calc_host_routes):
"""Compare host routes and calculated host routes
:param host_routes: Current host routes
:param calc_host_routes: Host routes + calculated host routes for
routed network
:returns True if host_routes and calc_host_routes are not equal
"""
return ((set((route['destination'],
route['nexthop']) for route in host_routes) !=
set((route['destination'],
route['nexthop']) for route in calc_host_routes)))
def _update_routed_network_host_routes(self, context, network_id,
deleted_cidr=None):
"""Update host routes on subnets on a routed network after event
Host routes on the subnets on a routed network may need updates after
any CREATE or DELETE event.
:param network_id: Network ID
:param deleted_cidr: The cidr of a deleted subnet.
"""
for subnet in self._get_subnets(context, network_id):
host_routes = [{'destination': str(route.destination),
'nexthop': route.nexthop}
for route in subnet.host_routes]
calc_host_routes = self._calculate_routed_network_host_routes(
context=context,
ip_version=subnet.ip_version,
network_id=subnet.network_id,
subnet_id=subnet.id,
segment_id=subnet.segment_id,
host_routes=copy.deepcopy(host_routes),
gateway_ip=subnet.gateway_ip,
deleted_cidr=deleted_cidr)
if self._host_routes_need_update(host_routes, calc_host_routes):
LOG.debug(
"Updating host routes for subnet %s on routed network %s",
(subnet.id, subnet.network_id))
plugin = directory.get_plugin()
plugin.update_subnet(context, subnet.id,
{'subnet': {
'host_routes': calc_host_routes}})
@registry.receives(resources.SUBNET, [events.BEFORE_CREATE])
def host_routes_before_create(self, resource, event, trigger, context,
subnet, **kwargs):
segment_id = subnet.get('segment_id')
gateway_ip = subnet.get('gateway_ip')
if validators.is_attr_set(subnet.get('host_routes')):
host_routes = subnet.get('host_routes')
else:
host_routes = []
if segment_id is not None and validators.is_attr_set(gateway_ip):
calc_host_routes = self._calculate_routed_network_host_routes(
context=context,
ip_version=netaddr.IPNetwork(subnet['cidr']).version,
network_id=subnet['network_id'],
segment_id=subnet['segment_id'],
host_routes=copy.deepcopy(host_routes),
gateway_ip=gateway_ip)
if (not host_routes or
self._host_routes_need_update(host_routes,
calc_host_routes)):
subnet['host_routes'] = calc_host_routes
@registry.receives(resources.SUBNET, [events.BEFORE_UPDATE])
def host_routes_before_update(self, resource, event, trigger, **kwargs):
context = kwargs['context']
subnet, original_subnet = kwargs['request'], kwargs['original_subnet']
segment_id = subnet.get('segment_id', original_subnet['segment_id'])
gateway_ip = subnet.get('gateway_ip', original_subnet['gateway_ip'])
host_routes = subnet.get('host_routes', original_subnet['host_routes'])
if (segment_id and (host_routes != original_subnet['host_routes'] or
gateway_ip != original_subnet['gateway_ip'])):
calc_host_routes = self._calculate_routed_network_host_routes(
context=context,
ip_version=netaddr.IPNetwork(original_subnet['cidr']).version,
network_id=original_subnet['network_id'],
segment_id=segment_id,
host_routes=copy.deepcopy(host_routes),
gateway_ip=gateway_ip,
old_gateway_ip=original_subnet['gateway_ip'] if (
gateway_ip != original_subnet['gateway_ip']) else None)
if self._host_routes_need_update(host_routes, calc_host_routes):
subnet['host_routes'] = calc_host_routes
@registry.receives(resources.SUBNET, [events.AFTER_CREATE])
def host_routes_after_create(self, resource, event, trigger, **kwargs):
context = kwargs['context']
subnet = kwargs['subnet']
# If there are other subnets on the network and subnet has segment_id
# ensure host routes for all subnets are updated.
if (len(self._get_subnets(context, subnet['network_id'])) > 1 and
subnet.get('segment_id')):
self._update_routed_network_host_routes(context,
subnet['network_id'])
@registry.receives(resources.SUBNET, [events.AFTER_DELETE])
def host_routes_after_delete(self, resource, event, trigger, context,
subnet, **kwargs):
# If this is a routed network, remove any routes to this subnet on
# this networks remaining subnets.
if subnet.get('segment_id'):
self._update_routed_network_host_routes(
context, subnet['network_id'], deleted_cidr=subnet['cidr'])
| huntxu/neutron | neutron/services/segments/plugin.py | Python | apache-2.0 | 28,272 |
import time
import sqlite3
from base_model import BaseModel
from datetime import datetime
from contextlib import contextmanager
class SSIDTrafficHistory(BaseModel):
def __init__(self, dbfile, table_name, time_limit):
super(SSIDTrafficHistory, self).__init__(dbfile, table_name)
self.time_limit = time_limit
def init_db(self):
with self.db_cursor() as c:
c.execute('''
CREATE TABLE IF NOT EXISTS {} (
timestamp integer,
adapter text,
ssid text,
rx integer,
tx integer,
PRIMARY KEY (timestamp, adapter, ssid)
)
'''.format(self.table_name))
def truncate_time(timestamp):
raise NotImplementedError
def query(self, adapter, ssid, timestamp=None):
if not timestamp:
timestamp = time.time()
with self.db_cursor(commit=False) as c:
query = '''
SELECT timestamp, adapter, ssid, rx, tx
FROM {}
WHERE adapter=? AND ssid=? AND timestamp=?;
'''.format(self.table_name)
c.execute(query, (adapter, ssid, self.truncate_time(timestamp)))
result = c.fetchone()
if result == None:
result = (self.truncate_time(timestamp), adapter, ssid, 0, 0)
return {
'timestamp': self.truncate_time(timestamp),
'adapter': adapter,
'ssid': ssid,
'rx': result[3],
'tx': result[4]
}
def query_all(self, start_time=None, end_time=None, timestamp=None):
if not timestamp:
timestamp = time.time()
if not end_time:
end_time = timestamp
if not start_time:
start_time = self.truncate_time(end_time)
with self.db_cursor(commit=False) as c:
query = '''
SELECT timestamp, adapter, ssid, sum(rx), sum(tx)
FROM {}
WHERE timestamp >= ? AND timestamp <= ?
GROUP BY adapter, ssid
ORDER BY adapter, ssid;
'''.format(self.table_name)
c.execute(query, (start_time, end_time))
results = c.fetchall()
query_result = {}
for r in results:
ts, adapter, ssid, rx, tx = r
if adapter not in query_result:
query_result[adapter] = []
query_result[adapter].append({
'timestamp': ts,
'adapter': adapter,
'ssid': ssid,
'rx': rx,
'tx': tx
})
return query_result
def update(self, adapter, ssid, rx, tx, timestamp=None):
if not timestamp:
timestamp = time.time()
with self.db_cursor() as c:
query = '''
INSERT OR REPLACE INTO {} (timestamp, adapter, ssid, rx, tx)
VALUES ( ?, ?, ?, ?, ? );
'''.format(self.table_name)
c.execute(query, (self.truncate_time(timestamp), adapter, ssid, rx, tx))
def add(self, adapter, ssid, delta_rx, delta_tx, timestamp=None):
if not timestamp:
timestamp = time.time()
prev = self.query(adapter, ssid, timestamp=timestamp)
self.update(
adapter, ssid,
prev['rx']+delta_rx, prev['tx']+delta_tx,
timestamp=timestamp
)
self.clear(timestamp=timestamp)
def clear(self, timestamp=None):
if not timestamp:
timestamp = time.time()
with self.db_cursor() as c:
query = '''
DELETE FROM {}
WHERE timestamp < ?;
'''.format(self.table_name)
c.execute(query, (timestamp - self.time_limit, ))
| putrasattvika/ssidstat | ssidstat/common/models/ssid_traffic_history.py | Python | apache-2.0 | 3,044 |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Haiku recurrent core."""
import abc
import types
from typing import Any, NamedTuple, Optional, Sequence, Tuple, Union
from haiku._src import base
from haiku._src import basic
from haiku._src import conv
from haiku._src import initializers
from haiku._src import module
from haiku._src import stateful
import jax
import jax.nn
import jax.numpy as jnp
# If you are forking replace this with `import haiku as hk`.
hk = types.ModuleType("haiku")
hk.initializers = initializers
hk.Linear = basic.Linear
hk.ConvND = conv.ConvND
hk.get_parameter = base.get_parameter
hk.Module = module.Module
hk.scan = stateful.scan
inside_transform = base.inside_transform
del base, basic, conv, initializers, module
class RNNCore(hk.Module):
"""Base class for RNN cores.
This class defines the basic functionality that every core should
implement: :meth:`initial_state`, used to construct an example of the
core state; and :meth:`__call__` which applies the core parameterized
by a previous state to an input.
Cores may be used with :func:`dynamic_unroll` and :func:`static_unroll` to
iteratively construct an output sequence from the given input sequence.
"""
@abc.abstractmethod
def __call__(self, inputs, prev_state) -> Tuple[Any, Any]:
"""Run one step of the RNN.
Args:
inputs: An arbitrarily nested structure.
prev_state: Previous core state.
Returns:
A tuple with two elements ``output, next_state``. ``output`` is an
arbitrarily nested structure. ``next_state`` is the next core state, this
must be the same shape as ``prev_state``.
"""
@abc.abstractmethod
def initial_state(self, batch_size: Optional[int]):
"""Constructs an initial state for this core.
Args:
batch_size: Optional int or an integral scalar tensor representing
batch size. If None, the core may either fail or (experimentally)
return an initial state without a batch dimension.
Returns:
Arbitrarily nested initial state for this core.
"""
def static_unroll(core, input_sequence, initial_state, time_major=True):
"""Performs a static unroll of an RNN.
An *unroll* corresponds to calling the core on each element of the
input sequence in a loop, carrying the state through::
state = initial_state
for t in range(len(input_sequence)):
outputs, state = core(input_sequence[t], state)
A *static* unroll replaces a loop with its body repeated multiple
times when executed inside :func:`jax.jit`::
state = initial_state
outputs0, state = core(input_sequence[0], state)
outputs1, state = core(input_sequence[1], state)
outputs2, state = core(input_sequence[2], state)
...
See :func:`dynamic_unroll` for a loop-preserving unroll function.
Args:
core: An :class:`RNNCore` to unroll.
input_sequence: An arbitrarily nested structure of tensors of shape
``[T, ...]`` if time-major=True, or ``[B, T, ...]`` if time_major=False,
where ``T`` is the number of time steps.
initial_state: An initial state of the given core.
time_major: If True, inputs are expected time-major, otherwise they are
expected batch-major.
Returns:
A tuple with two elements:
* **output_sequence** - An arbitrarily nested structure of tensors
of shape ``[T, ...]`` if time-major, otherwise ``[B, T, ...]``.
* **final_state** - Core state at time step ``T``.
"""
output_sequence = []
time_axis = 0 if time_major else 1
num_steps = jax.tree_leaves(input_sequence)[0].shape[time_axis]
state = initial_state
for t in range(num_steps):
if time_major:
inputs = jax.tree_map(lambda x, _t=t: x[_t], input_sequence)
else:
inputs = jax.tree_map(lambda x, _t=t: x[:, _t], input_sequence)
outputs, state = core(inputs, state)
output_sequence.append(outputs)
# Stack outputs along the time axis.
output_sequence = jax.tree_multimap(
lambda *args: jnp.stack(args, axis=time_axis),
*output_sequence)
return output_sequence, state
def _swap_batch_time(inputs):
"""Swaps batch and time axes, assumed to be the first two axes."""
return jax.tree_map(lambda x: jnp.swapaxes(x, 0, 1), inputs)
def dynamic_unroll(core,
input_sequence,
initial_state,
time_major=True,
reverse=False,
return_all_states=False):
"""Performs a dynamic unroll of an RNN.
An *unroll* corresponds to calling the core on each element of the
input sequence in a loop, carrying the state through::
state = initial_state
for t in range(len(input_sequence)):
outputs, state = core(input_sequence[t], state)
A *dynamic* unroll preserves the loop structure when executed inside
:func:`jax.jit`. See :func:`static_unroll` for an unroll function which
replaces a loop with its body repeated multiple times.
Args:
core: An :class:`RNNCore` to unroll.
input_sequence: An arbitrarily nested structure of tensors of shape
``[T, ...]`` if time-major=True, or ``[B, T, ...]`` if time_major=False,
where ``T`` is the number of time steps.
initial_state: An initial state of the given core.
time_major: If True, inputs are expected time-major, otherwise they are
expected batch-major.
reverse: If True, inputs are scanned in the reversed order. Equivalent to
reversing the time dimension in both inputs and outputs. See
https://jax.readthedocs.io/en/latest/_autosummary/jax.lax.scan.html for
more details.
return_all_states: If True, all intermediate states are returned rather than
only the last one in time.
Returns:
A tuple with two elements:
* **output_sequence** - An arbitrarily nested structure of tensors
of shape ``[T, ...]`` if time-major, otherwise ``[B, T, ...]``.
* **state_sequence** - If return_all_states is True, returns the sequence
of core states. Otherwise, core state at time step ``T``.
"""
scan = hk.scan if inside_transform() else jax.lax.scan
# Swap the input and output of core.
def scan_f(prev_state, inputs):
outputs, next_state = core(inputs, prev_state)
if return_all_states:
return next_state, (outputs, next_state)
return next_state, outputs
# TODO(hamzamerzic): Remove axis swapping once scan supports time axis arg.
if not time_major:
input_sequence = _swap_batch_time(input_sequence)
scan_result = scan(
scan_f, initial_state, input_sequence, reverse=reverse)
if return_all_states:
_, (output_sequence, state_sequence) = scan_result
else:
last_state, output_sequence = scan_result
if not time_major:
output_sequence = _swap_batch_time(output_sequence)
if return_all_states:
state_sequence = _swap_batch_time(state_sequence)
if return_all_states:
return output_sequence, state_sequence
return output_sequence, last_state
def add_batch(nest, batch_size: Optional[int]):
"""Adds a batch dimension at axis 0 to the leaves of a nested structure."""
broadcast = lambda x: jnp.broadcast_to(x, (batch_size,) + x.shape)
return jax.tree_map(broadcast, nest)
class VanillaRNN(RNNCore):
r"""Basic fully-connected RNN core.
Given :math:`x_t` and the previous hidden state :math:`h_{t-1}` the
core computes
.. math::
h_t = \operatorname{ReLU}(w_i x_t + b_i + w_h h_{t-1} + b_h)
The output is equal to the new state, :math:`h_t`.
"""
def __init__(
self,
hidden_size: int,
double_bias: bool = True,
name: Optional[str] = None
):
"""Constructs a vanilla RNN core.
Args:
hidden_size: Hidden layer size.
double_bias: Whether to use a bias in the two linear layers. This changes
nothing to the learning performance of the cell. However, doubling will
create two sets of bias parameters rather than one.
name: Name of the module.
"""
super().__init__(name=name)
self.hidden_size = hidden_size
self.double_bias = double_bias
def __call__(self, inputs, prev_state):
input_to_hidden = hk.Linear(self.hidden_size)
# TODO(b/173771088): Consider changing default to double_bias=False.
hidden_to_hidden = hk.Linear(self.hidden_size, with_bias=self.double_bias)
out = jax.nn.relu(input_to_hidden(inputs) + hidden_to_hidden(prev_state))
return out, out
def initial_state(self, batch_size: Optional[int]):
state = jnp.zeros([self.hidden_size])
if batch_size is not None:
state = add_batch(state, batch_size)
return state
class LSTMState(NamedTuple):
"""An LSTM core state consists of hidden and cell vectors.
Attributes:
hidden: Hidden state.
cell: Cell state.
"""
hidden: jnp.ndarray
cell: jnp.ndarray
class LSTM(RNNCore):
r"""Long short-term memory (LSTM) RNN core.
The implementation is based on :cite:`zaremba2014recurrent`. Given
:math:`x_t` and the previous state :math:`(h_{t-1}, c_{t-1})` the core
computes
.. math::
\begin{array}{ll}
i_t = \sigma(W_{ii} x_t + W_{hi} h_{t-1} + b_i) \\
f_t = \sigma(W_{if} x_t + W_{hf} h_{t-1} + b_f) \\
g_t = \tanh(W_{ig} x_t + W_{hg} h_{t-1} + b_g) \\
o_t = \sigma(W_{io} x_t + W_{ho} h_{t-1} + b_o) \\
c_t = f_t c_{t-1} + i_t g_t \\
h_t = o_t \tanh(c_t)
\end{array}
where :math:`i_t`, :math:`f_t`, :math:`o_t` are input, forget and
output gate activations, and :math:`g_t` is a vector of cell updates.
The output is equal to the new hidden, :math:`h_t`.
Notes:
Forget gate initialization:
Following :cite:`jozefowicz2015empirical` we add 1.0 to :math:`b_f`
after initialization in order to reduce the scale of forgetting in
the beginning of the training.
"""
def __init__(self, hidden_size: int, name: Optional[str] = None):
"""Constructs an LSTM.
Args:
hidden_size: Hidden layer size.
name: Name of the module.
"""
super().__init__(name=name)
self.hidden_size = hidden_size
def __call__(
self,
inputs: jnp.ndarray,
prev_state: LSTMState,
) -> Tuple[jnp.ndarray, LSTMState]:
if len(inputs.shape) > 2 or not inputs.shape:
raise ValueError("LSTM input must be rank-1 or rank-2.")
x_and_h = jnp.concatenate([inputs, prev_state.hidden], axis=-1)
gated = hk.Linear(4 * self.hidden_size)(x_and_h)
# TODO(slebedev): Consider aligning the order of gates with Sonnet.
# i = input, g = cell_gate, f = forget_gate, o = output_gate
i, g, f, o = jnp.split(gated, indices_or_sections=4, axis=-1)
f = jax.nn.sigmoid(f + 1) # Forget bias, as in sonnet.
c = f * prev_state.cell + jax.nn.sigmoid(i) * jnp.tanh(g)
h = jax.nn.sigmoid(o) * jnp.tanh(c)
return h, LSTMState(h, c)
def initial_state(self, batch_size: Optional[int]) -> LSTMState:
state = LSTMState(hidden=jnp.zeros([self.hidden_size]),
cell=jnp.zeros([self.hidden_size]))
if batch_size is not None:
state = add_batch(state, batch_size)
return state
class ConvNDLSTM(RNNCore):
r"""``num_spatial_dims``-D convolutional LSTM.
The implementation is based on :cite:`xingjian2015convolutional`.
Given :math:`x_t` and the previous state :math:`(h_{t-1}, c_{t-1})`
the core computes
.. math::
\begin{array}{ll}
i_t = \sigma(W_{ii} * x_t + W_{hi} * h_{t-1} + b_i) \\
f_t = \sigma(W_{if} * x_t + W_{hf} * h_{t-1} + b_f) \\
g_t = \tanh(W_{ig} * x_t + W_{hg} * h_{t-1} + b_g) \\
o_t = \sigma(W_{io} * x_t + W_{ho} * h_{t-1} + b_o) \\
c_t = f_t c_{t-1} + i_t g_t \\
h_t = o_t \tanh(c_t)
\end{array}
where :math:`*` denotes the convolution operator; :math:`i_t`,
:math:`f_t`, :math:`o_t` are input, forget and output gate activations,
and :math:`g_t` is a vector of cell updates.
The output is equal to the new hidden state, :math:`h_t`.
Notes:
Forget gate initialization:
Following :cite:`jozefowicz2015empirical` we add 1.0 to :math:`b_f`
after initialization in order to reduce the scale of forgetting in
the beginning of the training.
"""
def __init__(
self,
num_spatial_dims: int,
input_shape: Sequence[int],
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
name: Optional[str] = None,
):
"""Constructs a convolutional LSTM.
Args:
num_spatial_dims: Number of spatial dimensions of the input.
input_shape: Shape of the inputs excluding batch size.
output_channels: Number of output channels.
kernel_shape: Sequence of kernel sizes (of length ``num_spatial_dims``),
or an int. ``kernel_shape`` will be expanded to define a kernel size in
all dimensions.
name: Name of the module.
"""
super().__init__(name=name)
self.num_spatial_dims = num_spatial_dims
self.input_shape = tuple(input_shape)
self.output_channels = output_channels
self.kernel_shape = kernel_shape
def __call__(
self,
inputs,
state: LSTMState,
) -> Tuple[jnp.ndarray, LSTMState]:
input_to_hidden = hk.ConvND(
num_spatial_dims=self.num_spatial_dims,
output_channels=4 * self.output_channels,
kernel_shape=self.kernel_shape,
name="input_to_hidden")
hidden_to_hidden = hk.ConvND(
num_spatial_dims=self.num_spatial_dims,
output_channels=4 * self.output_channels,
kernel_shape=self.kernel_shape,
name="hidden_to_hidden")
gates = input_to_hidden(inputs) + hidden_to_hidden(state.hidden)
i, g, f, o = jnp.split(gates, indices_or_sections=4, axis=-1)
f = jax.nn.sigmoid(f + 1)
c = f * state.cell + jax.nn.sigmoid(i) * jnp.tanh(g)
h = jax.nn.sigmoid(o) * jnp.tanh(c)
return h, LSTMState(h, c)
def initial_state(self, batch_size: Optional[int]) -> LSTMState:
shape = self.input_shape + (self.output_channels,)
state = LSTMState(jnp.zeros(shape), jnp.zeros(shape))
if batch_size is not None:
state = add_batch(state, batch_size)
return state
class Conv1DLSTM(ConvNDLSTM): # pylint: disable=empty-docstring
__doc__ = ConvNDLSTM.__doc__.replace("``num_spatial_dims``", "1")
def __init__(
self,
input_shape: Sequence[int],
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
name: Optional[str] = None,
):
"""Constructs a 1-D convolutional LSTM.
Args:
input_shape: Shape of the inputs excluding batch size.
output_channels: Number of output channels.
kernel_shape: Sequence of kernel sizes (of length 1), or an int.
``kernel_shape`` will be expanded to define a kernel size in all
dimensions.
name: Name of the module.
"""
super().__init__(
num_spatial_dims=1,
input_shape=input_shape,
output_channels=output_channels,
kernel_shape=kernel_shape,
name=name)
class Conv2DLSTM(ConvNDLSTM): # pylint: disable=empty-docstring
__doc__ = ConvNDLSTM.__doc__.replace("``num_spatial_dims``", "2")
def __init__(
self,
input_shape: Sequence[int],
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
name: Optional[str] = None,
):
"""Constructs a 2-D convolutional LSTM.
Args:
input_shape: Shape of the inputs excluding batch size.
output_channels: Number of output channels.
kernel_shape: Sequence of kernel sizes (of length 2), or an int.
``kernel_shape`` will be expanded to define a kernel size in all
dimensions.
name: Name of the module.
"""
super().__init__(
num_spatial_dims=2,
input_shape=input_shape,
output_channels=output_channels,
kernel_shape=kernel_shape,
name=name)
class Conv3DLSTM(ConvNDLSTM): # pylint: disable=empty-docstring
__doc__ = ConvNDLSTM.__doc__.replace("``num_spatial_dims``", "3")
def __init__(
self,
input_shape: Sequence[int],
output_channels: int,
kernel_shape: Union[int, Sequence[int]],
name: Optional[str] = None,
):
"""Constructs a 3-D convolutional LSTM.
Args:
input_shape: Shape of the inputs excluding batch size.
output_channels: Number of output channels.
kernel_shape: Sequence of kernel sizes (of length 3), or an int.
``kernel_shape`` will be expanded to define a kernel size in all
dimensions.
name: Name of the module.
"""
super().__init__(
num_spatial_dims=3,
input_shape=input_shape,
output_channels=output_channels,
kernel_shape=kernel_shape,
name=name)
class GRU(RNNCore):
r"""Gated Recurrent Unit.
The implementation is based on: https://arxiv.org/pdf/1412.3555v1.pdf with
biases.
Given :math:`x_t` and the previous state :math:`h_{t-1}` the core computes
.. math::
\begin{array}{ll}
z_t &= \sigma(W_{iz} x_t + W_{hz} h_{t-1} + b_z) \\
r_t &= \sigma(W_{ir} x_t + W_{hr} h_{t-1} + b_r) \\
a_t &= \tanh(W_{ia} x_t + W_{ha} (r_t \bigodot h_{t-1}) + b_a) \\
h_t &= (1 - z_t) \bigodot h_{t-1} + z_t \bigodot a_t
\end{array}
where :math:`z_t` and :math:`r_t` are reset and update gates.
The output is equal to the new hidden state, :math:`h_t`.
"""
def __init__(
self,
hidden_size: int,
w_i_init: Optional[hk.initializers.Initializer] = None,
w_h_init: Optional[hk.initializers.Initializer] = None,
b_init: Optional[hk.initializers.Initializer] = None,
name: Optional[str] = None,
):
super().__init__(name=name)
self.hidden_size = hidden_size
self.w_i_init = w_i_init or hk.initializers.VarianceScaling()
self.w_h_init = w_h_init or hk.initializers.VarianceScaling()
self.b_init = b_init or jnp.zeros
def __call__(self, inputs, state):
if inputs.ndim not in (1, 2):
raise ValueError("GRU input must be rank-1 or rank-2.")
input_size = inputs.shape[-1]
hidden_size = self.hidden_size
w_i = hk.get_parameter("w_i", [input_size, 3 * hidden_size], inputs.dtype,
init=self.w_i_init)
w_h = hk.get_parameter("w_h", [hidden_size, 3 * hidden_size], inputs.dtype,
init=self.w_h_init)
b = hk.get_parameter("b", [3 * hidden_size], inputs.dtype, init=self.b_init)
w_h_z, w_h_a = jnp.split(w_h, indices_or_sections=[2 * hidden_size], axis=1)
b_z, b_a = jnp.split(b, indices_or_sections=[2 * hidden_size], axis=0)
gates_x = jnp.matmul(inputs, w_i)
zr_x, a_x = jnp.split(
gates_x, indices_or_sections=[2 * hidden_size], axis=-1)
zr_h = jnp.matmul(state, w_h_z)
zr = zr_x + zr_h + jnp.broadcast_to(b_z, zr_h.shape)
z, r = jnp.split(jax.nn.sigmoid(zr), indices_or_sections=2, axis=-1)
a_h = jnp.matmul(r * state, w_h_a)
a = jnp.tanh(a_x + a_h + jnp.broadcast_to(b_a, a_h.shape))
next_state = (1 - z) * state + z * a
return next_state, next_state
def initial_state(self, batch_size: Optional[int]):
state = jnp.zeros([self.hidden_size])
if batch_size is not None:
state = add_batch(state, batch_size)
return state
class IdentityCore(RNNCore):
"""A recurrent core that forwards the inputs and an empty state.
This is commonly used when switching between recurrent and feedforward
versions of a model while preserving the same interface.
"""
def __call__(self, inputs, state):
return inputs, state
def initial_state(self, batch_size: Optional[int]):
return ()
def _validate_and_conform(should_reset, state):
"""Ensures that should_reset is compatible with state."""
if should_reset.shape == state.shape[:should_reset.ndim]:
broadcast_shape = should_reset.shape + (1,)*(state.ndim - should_reset.ndim)
return jnp.reshape(should_reset, broadcast_shape)
raise ValueError(
"should_reset signal shape {} is not compatible with "
"state shape {}".format(should_reset.shape, state.shape))
class ResetCore(RNNCore):
"""A wrapper for managing state resets during unrolls.
When unrolling an :class:`RNNCore` on a batch of inputs sequences it may be
necessary to reset the core's state at different timesteps for different
elements of the batch. The :class:`ResetCore` class enables this by taking a
batch of ``should_reset`` booleans in addition to the batch of inputs, and
conditionally resetting the core's state for individual elements of the batch.
You may also reset individual entries of the state by passing a
``should_reset`` nest compatible with the state structure.
"""
def __init__(self, core: RNNCore, name: Optional[str] = None):
super().__init__(name=name)
self.core = core
def __call__(self, inputs, state):
"""Run one step of the wrapped core, handling state reset.
Args:
inputs: Tuple with two elements, ``inputs, should_reset``, where
``should_reset`` is the signal used to reset the wrapped core's state.
``should_reset`` can be either tensor or nest. If nest, ``should_reset``
must match the state structure, and its components' shapes must be
prefixes of the corresponding entries tensors' shapes in the state nest.
If tensor, supported shapes are all commom shape prefixes of the state
component tensors, e.g. ``[batch_size]``.
state: Previous wrapped core state.
Returns:
Tuple of the wrapped core's ``output, next_state``.
"""
inputs, should_reset = inputs
if jax.treedef_is_leaf(jax.tree_structure(should_reset)):
# Equivalent to not tree.is_nested, but with support for Jax extensible
# pytrees.
should_reset = jax.tree_map(lambda _: should_reset, state)
# We now need to manually pad 'on the right' to ensure broadcasting operates
# correctly.
# Automatic broadcasting would in fact implicitly pad 'on the left',
# resulting in the signal to trigger resets for parts of the state
# across batch entries. For example:
#
# import jax
# import jax.numpy as jnp
#
# shape = (2, 2, 2)
# x = jnp.zeros(shape)
# y = jnp.ones(shape)
# should_reset = jnp.array([False, True])
# v = jnp.where(should_reset, x, y)
# for batch_entry in range(shape[0]):
# print("batch_entry {}:\n".format(batch_entry), v[batch_entry])
#
# >> batch_entry 0:
# >> [[1. 0.]
# >> [1. 0.]]
# >> batch_entry 1:
# >> [[1. 0.]
# >> [1. 0.]]
#
# Note how manually padding the should_reset tensor yields the desired
# behavior.
#
# import jax
# import jax.numpy as jnp
#
# shape = (2, 2, 2)
# x = jnp.zeros(shape)
# y = jnp.ones(shape)
# should_reset = jnp.array([False, True])
# dims_to_add = x.ndim - should_reset.ndim
# should_reset = should_reset.reshape(should_reset.shape + (1,)*dims_to_add)
# v = jnp.where(should_reset, x, y)
# for batch_entry in range(shape[0]):
# print("batch_entry {}:\n".format(batch_entry), v[batch_entry])
#
# >> batch_entry 0:
# >> [[1. 1.]
# >> [1. 1.]]
# >> batch_entry 1:
# >> [[0. 0.]
# >> [0. 0.]]
should_reset = jax.tree_multimap(_validate_and_conform, should_reset, state)
if self._is_batched(state):
batch_size = jax.tree_leaves(inputs)[0].shape[0]
else:
batch_size = None
initial_state = jax.tree_multimap(
lambda s, i: i.astype(s.dtype), state, self.initial_state(batch_size))
state = jax.tree_multimap(jnp.where, should_reset, initial_state, state)
return self.core(inputs, state)
def initial_state(self, batch_size: Optional[int]):
return self.core.initial_state(batch_size)
def _is_batched(self, state):
state = jax.tree_leaves(state)
if not state: # Empty state is treated as unbatched.
return False
batched = jax.tree_leaves(self.initial_state(batch_size=1))
return all(b.shape[1:] == s.shape[1:] for b, s in zip(batched, state))
class _DeepRNN(RNNCore):
"""Underlying implementation of DeepRNN with skip connections."""
def __init__(
self,
layers: Sequence[Any],
skip_connections: bool,
name: Optional[str] = None
):
super().__init__(name=name)
self.layers = layers
self.skip_connections = skip_connections
if skip_connections:
for layer in layers:
if not isinstance(layer, RNNCore):
raise ValueError("skip_connections requires for all layers to be "
"`hk.RNNCore`s. Layers is: {}".format(layers))
def __call__(self, inputs, state):
current_inputs = inputs
next_states = []
outputs = []
state_idx = 0
concat = lambda *args: jnp.concatenate(args, axis=-1)
for idx, layer in enumerate(self.layers):
if self.skip_connections and idx > 0:
current_inputs = jax.tree_multimap(concat, inputs, current_inputs)
if isinstance(layer, RNNCore):
current_inputs, next_state = layer(current_inputs, state[state_idx])
outputs.append(current_inputs)
next_states.append(next_state)
state_idx += 1
else:
current_inputs = layer(current_inputs)
if self.skip_connections:
out = jax.tree_multimap(concat, *outputs)
else:
out = current_inputs
return out, tuple(next_states)
def initial_state(self, batch_size: Optional[int]):
return tuple(
layer.initial_state(batch_size)
for layer in self.layers
if isinstance(layer, RNNCore))
class DeepRNN(_DeepRNN):
r"""Wraps a sequence of cores and callables as a single core.
>>> deep_rnn = hk.DeepRNN([
... hk.LSTM(hidden_size=4),
... jax.nn.relu,
... hk.LSTM(hidden_size=2),
... ])
The state of a :class:`DeepRNN` is a tuple with one element per
:class:`RNNCore`. If no layers are :class:`RNNCore`\ s, the state is an empty
tuple.
"""
def __init__(self, layers: Sequence[Any], name: Optional[str] = None):
super().__init__(layers, skip_connections=False, name=name)
def deep_rnn_with_skip_connections(layers: Sequence[RNNCore],
name: Optional[str] = None) -> RNNCore:
r"""Constructs a :class:`DeepRNN` with skip connections.
Skip connections alter the dependency structure within a :class:`DeepRNN`.
Specifically, input to the i-th layer (i > 0) is given by a
concatenation of the core's inputs and the outputs of the (i-1)-th layer.
The output of the :class:`DeepRNN` is the concatenation of the outputs of all
cores.
.. code-block:: python
outputs0, ... = layers[0](inputs, ...)
outputs1, ... = layers[1](tf.concat([inputs, outputs0], axis=-1], ...)
outputs2, ... = layers[2](tf.concat([inputs, outputs1], axis=-1], ...)
...
Args:
layers: List of :class:`RNNCore`\ s.
name: Name of the module.
Returns:
A :class:`_DeepRNN` with skip connections.
Raises:
ValueError: If any of the layers is not an :class:`RNNCore`.
"""
return _DeepRNN(layers, skip_connections=True, name=name)
| deepmind/dm-haiku | haiku/_src/recurrent.py | Python | apache-2.0 | 27,760 |
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
"""check_i18n - compare texts in the source with the language files
Searches in the MoinMoin sources for calls of _() and tries to extract
the parameter. Then it checks the language modules if those parameters
are in the dictionary.
Usage: check_i18n.py [lang ...]
Without arguments, checks all languages in i18n or the specified
languages. Look into MoinMoin.i18n.__init__ for availeable language
names.
The script will run from the moin root directory, where the MoinMoin
package lives, or from MoinMoin/i18n where this script lives.
TextFinder class based on code by Seo Sanghyeon and the python compiler
package.
TODO: fix it for the changed i18n stuff of moin 1.6
@copyright: 2003 Florian Festi, Nir Soffer, Thomas Waldmann
@license: GNU GPL, see COPYING for details.
"""
output_encoding = 'utf-8'
# These lead to crashes (MemoryError - due to missing codecs?)
#blacklist_files = ["ja.py", "zh.py", "zh_tw.py"]
#blacklist_langs = ["ja", "zh", "zh-tw"]
# If you have cjkcodecs installed, use this:
blacklist_files = []
blacklist_langs = []
import sys, os, compiler
from compiler.ast import Name, Const, CallFunc, Getattr
class TextFinder:
""" Walk through AST tree and collect text from gettext calls
Find all calls to gettext function in the source tree and collect
the texts in a dict. Use compiler to create an abstract syntax tree
from each source file, then find the nodes for gettext function
call, and get the text from the call.
Localized texts are used usually translated during runtime by
gettext functions and apear in the source as
_('text...'). TextFinder class finds calls to the '_' function in
any namespace, or your prefered gettext function.
Note that TextFinder will only retrieve text from function calls
with a constant argument like _('text'). Calls like _('text' % locals()),
_('text 1' + 'text 2') are marked as bad call in the report, and the
text is not retrieved into the dictionary.
Note also that texts in source can appear several times in the same
file or different files, but they will only apear once in the
dictionary that this tool creates.
The dictionary value for each text is a dictionary of filenames each
containing a list of (best guess) lines numbers containning the text.
"""
def __init__(self, name='_'):
""" Init with the gettext function name or '_'"""
self._name = name # getText function name
self._dictionary = {} # Unique texts in the found texts
self._found = 0 # All good calls including duplicates
self._bad = 0 # Bad calls: _('%s' % var) or _('a' + 'b')
def setFilename(self, filename):
"""Remember the filename we are parsing"""
self._filename = filename
def visitModule(self, node):
""" Start the search from the top node of a module
This is the entry point into the search. When compiler.walk is
called it calls this method with the module node.
This is the place to initialize module specific data.
"""
self._visited = {} # init node cache - we will visit each node once
self._lineno = 'NA' # init line number
# Start walking in the module node
self.walk(node)
def walk(self, node):
""" Walk through all nodes """
if node in self._visited:
# We visited this node already
return
self._visited[node] = 1
if not self.parseNode(node):
for child in node.getChildNodes():
self.walk(child)
def parseNode(self, node):
""" Parse function call nodes and collect text """
# Get the current line number. Since not all nodes have a line number
# we save the last line number - it should be close to the gettext call
if node.lineno is not None:
self._lineno = node.lineno
if node.__class__ == CallFunc and node.args:
child = node.node
klass = child.__class__
if (# Standard call _('text')
(klass == Name and child.name == self._name) or
# A call to an object attribute: object._('text')
(klass == Getattr and child.attrname == self._name)):
if node.args[0].__class__ == Const:
# Good call with a constant _('text')
self.addText(node.args[0].value)
else:
self.addBadCall(node)
return 1
return 0
def addText(self, text):
""" Add text to dictionary and count found texts.
Note that number of texts in dictionary could be different from
the number of texts found, because some texts appear several
times in the code.
Each text value is a dictionary of filenames that contain the
text and each filename value is the list of line numbers with
the text. Missing line numbers are recorded as 'NA'.
self._lineno is the last line number we checked. It may be the line
number of the text, or near it.
"""
self._found = self._found + 1
# Create key for this text if needed
if text not in self._dictionary:
self._dictionary[text] = {}
# Create key for this filename if needed
textInfo = self._dictionary[text]
if self._filename not in textInfo:
textInfo[self._filename] = [self._lineno]
else:
textInfo[self._filename].append(self._lineno)
def addBadCall(self, node):
"""Called when a bad call like _('a' + 'b') is found"""
self._bad = self._bad + 1
print
print "<!> Warning: non-constant _ call:"
print " `%s`" % str(node)
print " `%s`:%s" % (self._filename, self._lineno)
# Accessors
def dictionary(self):
return self._dictionary
def bad(self):
return self._bad
def found(self):
return self._found
def visit(path, visitor):
visitor.setFilename(path)
tree = compiler.parseFile(path)
compiler.walk(tree, visitor)
# MoinMoin specific stuff follows
class Report:
"""Language status report"""
def __init__(self, lang, sourceDict):
self.__lang = lang
self.__sourceDict = sourceDict
self.__langDict = None
self.__missing = {}
self.__unused = {}
self.__error = None
self.__ready = 0
self.create()
def loadLanguage(self):
filename = i18n.filename(self.__lang)
self.__langDict = pysupport.importName("MoinMoin.i18n." + filename, "text")
def create(self):
"""Compare language text dict against source dict"""
self.loadLanguage()
if not self.__langDict:
self.__error = "Language %s not found!" % self.__lang
self.__ready = 1
return
# Collect missing texts
for text in self.__sourceDict:
if text not in self.__langDict:
self.__missing[text] = self.__sourceDict[text]
# Collect unused texts
for text in self.__langDict:
if text not in self.__sourceDict:
self.__unused[text] = self.__langDict[text]
self.__ready = 1
def summary(self):
"""Return summary dict"""
summary = {
'name': i18n.languages[self.__lang][i18n.ENAME].encode(output_encoding),
'maintainer': i18n.languages[self.__lang][i18n.MAINTAINER],
'total': len(self.__langDict),
'missing': len(self.__missing),
'unused': len(self.__unused),
'error': self.__error
}
return summary
def missing(self):
return self.__missing
def unused(self):
return self.__unused
if __name__ == '__main__':
import time
# Check that we run from the root directory where MoinMoin package lives
# or from the i18n directory when this script lives
if os.path.exists('MoinMoin/__init__.py'):
# Running from the root directory
MoinMoin_dir = os.curdir
elif os.path.exists(os.path.join(os.pardir, 'i18n')):
# Runing from i18n
MoinMoin_dir = os.path.join(os.pardir, os.pardir)
else:
print __doc__
sys.exit(1)
# Insert MoinMoin_dir into sys.path
sys.path.insert(0, MoinMoin_dir)
from MoinMoin import i18n
from MoinMoin.util import pysupport
textFinder = TextFinder()
found = 0
unique = 0
bad = 0
# Find gettext calls in the source
for root, dirs, files in os.walk(os.path.join(MoinMoin_dir, 'MoinMoin')):
for name in files:
if name.endswith('.py'):
if name in blacklist_files: continue
path = os.path.join(root, name)
#print '%(path)s:' % locals(),
visit(path, textFinder)
# Report each file's results
new_unique = len(textFinder.dictionary()) - unique
new_found = textFinder.found() - found
#print '%(new_unique)d (of %(new_found)d)' % locals()
# Warn about bad calls - these should be fixed!
new_bad = textFinder.bad() - bad
#if new_bad:
# print '### Warning: %(new_bad)d bad call(s)' % locals()
unique = unique + new_unique
bad = bad + new_bad
found = found + new_found
# Print report using wiki markup, so we can publish this on MoinDev
# !!! Todo:
# save executive summary for the wiki
# save separate report for each language to be sent to the
# language translator.
# Update the wiki using XML-RPC??
print "This page is generated by `MoinMoin/i18n/check_i18n.py`."
print "To recreate this report run `make check-i18n` and paste here"
print
print '----'
print
print '<<TableOfContents(2)>>'
print
print
print "= Translation Report ="
print
print "== Summary =="
print
print 'Created on %s' % time.asctime()
print
print ('\n%(unique)d unique texts in dictionary of %(found)d texts '
'in source.') % locals()
if bad:
print '\n%(bad)d bad calls.' % locals()
print
# Check languages from the command line or from moin.i18n against
# the source
if sys.argv[1:]:
languages = sys.argv[1:]
else:
languages = i18n.languages.keys()
for lang in blacklist_langs:
# problems, maybe due to encoding?
if lang in languages:
languages.remove(lang)
if 'en' in languages:
languages.remove('en') # there is no en lang file
languages.sort()
# Create report for all languages
report = {}
for lang in languages:
report[lang] = Report(lang, textFinder.dictionary())
# Print summary for all languages
print ("||<:>'''Language'''||<:>'''Texts'''||<:>'''Missing'''"
"||<:>'''Unused'''||")
for lang in languages:
print ("||%(name)s||<)>%(total)s||<)>%(missing)s||<)>%(unused)s||"
) % report[lang].summary()
# Print details
for lang in languages:
dict = report[lang].summary()
print
print "== %(name)s ==" % dict
print
print "Maintainer: <<MailTo(%(maintainer)s)>>" % dict
# Print missing texts, if any
if report[lang].missing():
print """
=== Missing texts ===
These items should ''definitely'' get fixed.
Maybe the corresponding english text in the source code was only changed
slightly, then you want to look for a similar text in the ''unused''
section below and modify i18n, so that it will match again.
"""
for text in report[lang].missing():
print " 1. `%r`" % text
# Print unused texts, if any
if report[lang].unused():
print """
=== Possibly unused texts ===
Be ''very careful'' and double-check before removing any of these
potentially unused items.
This program can't detect references done from wiki pages, from
userprefs options, from Icon titles etc.!
"""
for text in report[lang].unused():
print " 1. `%r`" % text
| RealTimeWeb/wikisite | MoinMoin/i18n/tools/check_i18n.py | Python | apache-2.0 | 12,365 |
# Copyright 2019, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import pkg_resources
import typing
from typing import cast, Any, Callable, Optional, Sequence, Union
import warnings
from google.auth.credentials import AnonymousCredentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.subscriber import futures
from google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager
from google.pubsub_v1.services.subscriber import client as subscriber_client
if typing.TYPE_CHECKING: # pragma: NO COVER
from google.cloud.pubsub_v1 import subscriber
from google.pubsub_v1.services.subscriber.transports.grpc import (
SubscriberGrpcTransport,
)
try:
__version__ = pkg_resources.get_distribution("google-cloud-pubsub").version
except pkg_resources.DistributionNotFound:
# Distribution might not be available if we are not running from within
# a PIP package.
__version__ = "0.0"
class Client(subscriber_client.SubscriberClient):
"""A subscriber client for Google Cloud Pub/Sub.
This creates an object that is capable of subscribing to messages.
Generally, you can instantiate this client with no arguments, and you
get sensible defaults.
Args:
kwargs: Any additional arguments provided are sent as keyword
keyword arguments to the underlying
:class:`~google.cloud.pubsub_v1.gapic.subscriber_client.SubscriberClient`.
Generally you should not need to set additional keyword
arguments. Optionally, regional endpoints can be set via
``client_options`` that takes a single key-value pair that
defines the endpoint.
Example:
.. code-block:: python
from google.cloud import pubsub_v1
subscriber_client = pubsub_v1.SubscriberClient(
# Optional
client_options = {
"api_endpoint": REGIONAL_ENDPOINT
}
)
"""
def __init__(self, **kwargs: Any):
# Sanity check: Is our goal to use the emulator?
# If so, create a grpc insecure channel with the emulator host
# as the target.
if os.environ.get("PUBSUB_EMULATOR_HOST"):
kwargs["client_options"] = {
"api_endpoint": os.environ.get("PUBSUB_EMULATOR_HOST")
}
kwargs["credentials"] = AnonymousCredentials()
# Instantiate the underlying GAPIC client.
super().__init__(**kwargs)
self._target = self._transport._host
self._closed = False
@classmethod
def from_service_account_file( # type: ignore[override]
cls, filename: str, **kwargs: Any
) -> "Client":
"""Creates an instance of this client using the provided credentials
file.
Args:
filename: The path to the service account private key json file.
kwargs: Additional arguments to pass to the constructor.
Returns:
A Subscriber :class:`~google.cloud.pubsub_v1.subscriber.client.Client`
instance that is the constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(**kwargs)
from_service_account_json = from_service_account_file # type: ignore[assignment]
@property
def target(self) -> str:
"""Return the target (where the API is).
Returns:
The location of the API.
"""
return self._target
@property
def closed(self) -> bool:
"""Return whether the client has been closed and cannot be used anymore.
.. versionadded:: 2.8.0
"""
return self._closed
@property
def api(self):
"""The underlying gapic API client.
.. versionchanged:: 2.10.0
Instead of a GAPIC ``SubscriberClient`` client instance, this property is a
proxy object to it with the same interface.
.. deprecated:: 2.10.0
Use the GAPIC methods and properties on the client instance directly
instead of through the :attr:`api` attribute.
"""
msg = (
'The "api" property only exists for backward compatibility, access its '
'attributes directly thorugh the client instance (e.g. "client.foo" '
'instead of "client.api.foo").'
)
warnings.warn(msg, category=DeprecationWarning)
return super()
def subscribe(
self,
subscription: str,
callback: Callable[["subscriber.message.Message"], Any],
flow_control: Union[types.FlowControl, Sequence] = (),
scheduler: Optional["subscriber.scheduler.ThreadScheduler"] = None,
use_legacy_flow_control: bool = False,
await_callbacks_on_shutdown: bool = False,
) -> futures.StreamingPullFuture:
"""Asynchronously start receiving messages on a given subscription.
This method starts a background thread to begin pulling messages from
a Pub/Sub subscription and scheduling them to be processed using the
provided ``callback``.
The ``callback`` will be called with an individual
:class:`google.cloud.pubsub_v1.subscriber.message.Message`. It is the
responsibility of the callback to either call ``ack()`` or ``nack()``
on the message when it finished processing. If an exception occurs in
the callback during processing, the exception is logged and the message
is ``nack()`` ed.
The ``flow_control`` argument can be used to control the rate of at
which messages are pulled. The settings are relatively conservative by
default to prevent "message hoarding" - a situation where the client
pulls a large number of messages but can not process them fast enough
leading it to "starve" other clients of messages. Increasing these
settings may lead to faster throughput for messages that do not take
a long time to process.
The ``use_legacy_flow_control`` argument disables enforcing flow control
settings at the Cloud Pub/Sub server, and only the client side flow control
will be enforced.
This method starts the receiver in the background and returns a
*Future* representing its execution. Waiting on the future (calling
``result()``) will block forever or until a non-recoverable error
is encountered (such as loss of network connectivity). Cancelling the
future will signal the process to shutdown gracefully and exit.
.. note:: This uses Pub/Sub's *streaming pull* feature. This feature
properties that may be surprising. Please take a look at
https://cloud.google.com/pubsub/docs/pull#streamingpull for
more details on how streaming pull behaves compared to the
synchronous pull method.
Example:
.. code-block:: python
from google.cloud import pubsub_v1
subscriber_client = pubsub_v1.SubscriberClient()
# existing subscription
subscription = subscriber_client.subscription_path(
'my-project-id', 'my-subscription')
def callback(message):
print(message)
message.ack()
future = subscriber_client.subscribe(
subscription, callback)
try:
future.result()
except KeyboardInterrupt:
future.cancel() # Trigger the shutdown.
future.result() # Block until the shutdown is complete.
Args:
subscription:
The name of the subscription. The subscription should have already been
created (for example, by using :meth:`create_subscription`).
callback:
The callback function. This function receives the message as
its only argument and will be called from a different thread/
process depending on the scheduling strategy.
flow_control:
The flow control settings. Use this to prevent situations where you are
inundated with too many messages at once.
scheduler:
An optional *scheduler* to use when executing the callback. This
controls how callbacks are executed concurrently. This object must not
be shared across multiple ``SubscriberClient`` instances.
use_legacy_flow_control (bool):
If set to ``True``, flow control at the Cloud Pub/Sub server is disabled,
though client-side flow control is still enabled. If set to ``False``
(default), both server-side and client-side flow control are enabled.
await_callbacks_on_shutdown:
If ``True``, after canceling the returned future, the latter's
``result()`` method will block until the background stream and its
helper threads have been terminated, and all currently executing message
callbacks are done processing.
If ``False`` (default), the returned future's ``result()`` method will
not block after canceling the future. The method will instead return
immediately after the background stream and its helper threads have been
terminated, but some of the message callback threads might still be
running at that point.
Returns:
A future instance that can be used to manage the background stream.
"""
flow_control = types.FlowControl(*flow_control)
manager = streaming_pull_manager.StreamingPullManager(
self,
subscription,
flow_control=flow_control,
scheduler=scheduler,
use_legacy_flow_control=use_legacy_flow_control,
await_callbacks_on_shutdown=await_callbacks_on_shutdown,
)
future = futures.StreamingPullFuture(manager)
manager.open(callback=callback, on_callback_error=future.set_exception)
return future
def close(self) -> None:
"""Close the underlying channel to release socket resources.
After a channel has been closed, the client instance cannot be used
anymore.
This method is idempotent.
"""
transport = cast("SubscriberGrpcTransport", self._transport)
transport.grpc_channel.close()
self._closed = True
def __enter__(self) -> "Client":
if self._closed:
raise RuntimeError("Closed subscriber cannot be used as context manager.")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| googleapis/python-pubsub | google/cloud/pubsub_v1/subscriber/client.py | Python | apache-2.0 | 11,505 |
#!/usr/bin/python
## Wrapper For transverse synapse detector workflow##
## This wrapper exists to facilitate workflow level parallelization inside the LONI pipeline until
## it is properly added to the tool. It is important for this step to do workflow level parallelization
## because of the order of processing.
##
## Make sure that you specify the environment variable MATLAB_EXE_LOCATION inside the LONI module. This can be
## set under advanced options on the 'Execution' tab in the module set up.
############################################################################################
## (c) 2012 The Johns Hopkins University / Applied Physics Laboratory. All Rights Reserved.
## Proprietary Until Publicly Released
############################################################################################
from sys import argv
import datetime
import string
import os
# read in command line args
params = list(argv)
subjDirectory = params[1]
outputDirectory = params[2]
# Find File Names
os.chdir(subjDirectory)
for files in os.listdir("."):
if files.endswith(".b"):
print "#B# " + os.path.abspath(files) + " #B#"
elif files.endswith(".grad"):
print "#GRAD# " + os.path.abspath(files) + " #GRAD#"
elif files.endswith("DTI.nii"):
print "#DTI# " + os.path.abspath(files) + " #DTI#"
elif files.endswith("MPRAGE.nii"):
print "#MPRAGE# " + os.path.abspath(files) + " #MPRAGE#"
# Make Output Directory
basename=os.path.basename
subDir=os.path.join(outputDirectory,basename(subjDirectory))
print "#@@@# " + subDir + " #@@@#"
#os.makedirs(subDir)
| gkiar/MR-devtools | tools/processSubjects.py | Python | apache-2.0 | 1,580 |
from ajenti.api import *
from ajenti.plugins import *
info = PluginInfo(
title='BIND9',
description='BIND9 DNS server',
icon='globe',
dependencies=[
PluginDependency('main'),
PluginDependency('services'),
BinaryDependency('named'),
],
)
def init():
import main
| lupyuen/RaspberryPiImage | usr/share/pyshared/ajenti/plugins/bind9/__init__.py | Python | apache-2.0 | 313 |
# -*- coding: utf-8 -*-
__author__ = 'itconsense@gmail.com'
from collections import OrderedDict
from math import pi
from Products.Five import BrowserView
from plone import api
import base64
import logging
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import six
LOG = logging.getLogger('evaluate')
class UpgradeIt(BrowserView):
def __call__(self):
portal_setup = api.portal.get_tool(name='portal_setup')
portal_setup.runImportStepFromProfile(
'profile-plonetheme.sunburst:default', 'cssregistry', run_dependencies=False)
portal_skins = api.portal.get_tool(name='portal_skins')
custom = portal_skins['custom']
for oid in ['main_template', 'base_properties', 'ploneCustom.css']:
if oid in custom:
api.content.delete(obj=custom[oid])
return "DONE"
class Result(object):
def __init__(self):
self.good = ''
self.details = {}
class EvaluateTestView(BrowserView):
no_text = 'Kein Textbaustein'
factors = {
'Meistens': 5,
'Manchmal': 3,
'Selten': 1,
'Nie': 0
}
pie_factors = {
'Meistens': 3,
'Manchmal': 2,
'Selten': 1,
'Nie': 0
}
chart_img = ''
def get_detail_elements(self):
zope_script = self.context.restrictedTraverse('text_detail_elements')
return zope_script()
def get_summary_elements(self):
zope_script = self.context.restrictedTraverse('text_summary_elements')
return zope_script()
def text_blocks(self):
result = OrderedDict()
form = self.request.form
summary = 0
df = OrderedDict()
elements = self.get_detail_elements()
for i, group in enumerate(elements.keys()):
if group not in form:
continue
group_title = self.context[group].Title()
result[group_title] = Result()
good_values = []
for key, val in form[group].items():
summary += self.factors[val]
element = elements[group].get(key, self.no_text)
title = element.get('Titel', group_title)
if val == 'Meistens':
good_values.append(title)
continue
text = element.get(val)
if not text:
continue
if val in element:
result[group_title].details[title] = text
else:
result[group_title].details[title] = element.get('default')
u_group_title = unicode(group_title, 'utf-8')
if u_group_title not in df:
df[u_group_title] = 0
df[u_group_title] += self.pie_factors[val]
if good_values:
result[group_title].good = ', '.join(good_values)
if not result[group_title].details:
LOG.warn('Details of group {0} are empty!'.format(group))
summary_elements = self.get_summary_elements()
if summary < 75:
result['summary'] = summary_elements['bad']
elif 75 >= summary < 130:
result['summary'] = summary_elements['med']
else:
result['summary'] = summary_elements['good']
self.chart_img = 'data:image/jpeg;base64, ' + self.get_radar_chart(df)
self.legend = df.keys()
return result
def get_radar_chart(self, df):
LOG.info('{0}'.format(df))
# number of variable
categories = list(df)
N = len(categories)
# We are going to plot the first line of the data frame.
# But we need to repeat the first value to close the circular graph:
values = df.values()
values.append(values[0])
# What will be the angle of each axis in the plot? (we divide the plot / number of variable)
angles = [n / float(N) * 2 * pi for n in range(N)]
angles += angles[:1]
# Initialise the spider plot
fig = plt.figure()
ax = plt.subplot(111, polar=True)
# Draw one axe per variable + add labels labels yet
plt.xticks(angles[:-1], range(1, N+1), color='grey', size=8, rotation='vertical')
# Draw ylabels
ax.set_rlabel_position(0)
plt.yticks([])
plt.ylim(0, min(21, max(values)) + 1)
# Plot data
ax.plot(angles, values, linewidth=1, linestyle='solid')
# Fill area
ax.fill(angles, values, 'b', alpha=0.1)
fig.savefig('test.png')
img = six.BytesIO()
fig.savefig(img, format='png')
img.seek(0)
return base64.b64encode(img.read())
| tomgross/pp-site | src/itc.pptheme/itc/pptheme/browser/view.py | Python | apache-2.0 | 4,725 |