gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Tara O'Kelly- G00322214.
# Third Year, Data Representation and Querying, Software Development, GMIT.
import couchdb, json, time
from flask import Flask, render_template, request
import flask as fl
app = fl.Flask(__name__)
# Set up server object, representing a CouchDB server and connect to databases
# Adapted from https://pythonhosted.org/CouchDB/
couch = couchdb.Server('http://127.0.0.1:5984/')
db = couch['users_tf']
db1 = couch['posts_tf']
# representing the user details of the current user
cur_doc = ""
# Routing: @app.route is a decorator used to match URLs to view functions in Flask apps.
# Each routing function checks for current user details
# and then renders the index.html template passing in the necessary values (using Jinja2, a template engine).
@app.route("/")
def root():
if(not bool(cur_doc)):
return render_template("index.html", loggedOut="loggedOut")
posts=getPosts()
return render_template("index.html", home="home", cur_doc=cur_doc, posts=posts)
@app.route("/Error/<reason>")
def error(reason):
if(not bool(cur_doc)):
return render_template("index.html", loggedOut = "loggedOut", error=reason)
posts=getPosts()
return render_template("index.html", home="home", cur_doc=cur_doc, posts=posts)
@app.route('/profile')
def profile():
if(not bool(cur_doc)):
return render_template("index.html", loggedOut = "loggedOut")
posts=getPostsByUser(cur_doc['username'])
return render_template("index.html", profile="profile", cur_doc=cur_doc, posts=posts)
@app.route('/search/users/<user>')
def search(user):
if(not bool(cur_doc)):
return render_template("index.html", loggedOut = "loggedOut")
posts=getPostsByUser(user);
return render_template("index.html", search=user, cur_doc=cur_doc, posts=posts)
@app.route('/search/tags/<tag>')
def searchTag(tag):
if(not bool(cur_doc)):
return render_template("index.html", loggedOut = "loggedOut")
posts=getPostsByTag(tag);
return render_template("index.html", search=tag, cur_doc=cur_doc, posts=posts)
@app.route('/settings')
def settings():
if(not bool(cur_doc)):
return render_template("index.html", loggedOut = "loggedOut")
return render_template("index.html", settings="settings", cur_doc=cur_doc)
@app.route('/logout', methods=["GET", "POST"])
def logout():
global cur_doc
cur_doc=""
return ''
# POST methods
# Checks credentials, sets the cur_doc to the current user's details (if credentials are correct) and returns next route
@app.route('/login', methods=["GET", "POST"])
def login():
name = fl.request.values["username"]
password = fl.request.values["password"]
for id in db:
doc = db[id]
if(doc['username']== name):
if(doc['password']== password):
global cur_doc
cur_doc = db[id]
return '/'
if(doc['password']!= password):
return '/Error/Invalid Password'
return '/Error/User Not Found'
# Checks sign in info, adding to database posts_tf if correct or returning next route (/Error/<reason> route)
@app.route('/register', methods=["GET", "POST"])
def register():
name = fl.request.values["user"]
password = fl.request.values["pass"]
password1 = fl.request.values["pass1"]
for id in db:
doc = db[id]
if(doc['username']== name):
return '/Error/Username Already Exists'
if(not bool(name)):
string = '/Error/Enter Username'
elif(name.isspace()):
string = '/Error/Username Must Contain Characters'
elif(password!=password1):
string = '/Error/Passwords Do Not Match'
elif(' ' in password):
string = '/Error/Password Contains Spaces'
elif(len(password) < 8):
string = '/Error/Password Must Contain 8 Characters'
else:
string = 'Congrats ' + name + '!'
db.save({'username': name, 'password': password})
return string
# Gets current time, builds tag string and converts to json. Then saves post to posts_tf database.
@app.route('/addPost', methods=["GET", "POST"])
def addPost():
post_content = fl.request.values["post_content"]
post_tags = fl.request.values["post_tags"]
post_time = time.strftime("%c")
if(not bool(post_content) or post_content.isspace()):
return 'Error'
tags = {tag.strip("#") for tag in post_tags.split() if tag.startswith("#")}
count=0
if(bool(tags)):
count =0
for tag in tags:
if(count==0):
temp = '['
else:
temp+= ','
temp += '{ "tag" : "'+ tag + '"}'
count+=1
temp += ']'
tags = json.loads(temp)
doc = {'username' : cur_doc['username'], 'post_content' : post_content, 'post_time' : post_time, 'tags' : tags, 'tags_string' : post_tags}
else:
doc = {'username' : cur_doc['username'], 'post_content' : post_content, 'post_time' : post_time, 'tags' : [], 'tags_string' : post_tags}
db1.save(doc)
return ''
# Finds user posts and deletes each one. Then deletes current user.
@app.route('/delete', methods=["GET","POST"])
def delete():
global cur_doc
for id in db1:
doc = db1[id]
if(doc['username']==cur_doc['username']):
db1.delete(doc)
db.delete(cur_doc)
cur_doc= ""
return ''
# Updates username of all current user's posts and updates current user details
@app.route('/updateName', methods=["GET","POST"])
def updateName():
global cur_doc
name = fl.request.values["name"]
for id in db:
doc =db[id]
if(name==doc['username']):
return 'Username Already Exists'
# Database Containing Posts
for id in db1:
doc =db1[id]
if(doc['username']==cur_doc['username']):
doc['username']=name
db1[doc.id]=doc
# Database Containing User Info
for id in db:
doc =db[id]
if(doc['username']==cur_doc['username']):
doc['username']=name
db[doc.id]=doc
cur_doc = doc
return 'successful'
# Validates new password and updates current user details
@app.route('/updatePass', methods=["GET","POST"])
def updatePass():
password = fl.request.values["password"]
password1 = fl.request.values["password1"]
global cur_doc
if(password!=password1):
return 'Passwords Do Not Match'
elif(' ' in password):
return 'Password Contains Spaces'
elif(len(password) < 8):
return 'Password Must Contain 8 Characters'
for id in db:
doc =db[id]
if(doc['username']==cur_doc['username']):
doc['password']=password
db[doc.id]=doc
cur_doc = doc
return 'successful'
@app.route('/securityCheck', methods=["GET","POST"])
def check():
temp = fl.request.values["password"]
if(temp==cur_doc['password']):
return 'Correct'
return 'Incorrect Password'
#Functions to get a list of certain posts
def getPosts():
# Adapted http://stackoverflow.com/questions/1640054/multiple-couchdb-document-fetch-with-couchdb-python
rows = db1.view('_all_docs', include_docs=True)
docs = [row.doc for row in rows]
jstring = json.dumps((docs), indent=4)
posts = json.loads(jstring)
posts.reverse()
return posts
def getPostsByUser(username):
posts=[]
count = 0
for id in db1:
doc=db1[id]
if(doc['username']==username):
if(count==0):
string = '['
else:
string += ','
string += '{"username": "'+ doc['username'] +'", "post_content": "'+ doc['post_content'] +'", "post_time": "'+ doc['post_time'] +'", "tags_string" : "'+ doc['tags_string'] +'"}'
count+=1
if(count!=0):
string+=']'
#Adapted from http://stackoverflow.com/questions/9347419/python-strip-with-n
string = string.replace('\n',' ')
posts = json.loads(string)
posts.reverse()
return posts
def getPostsByTag(TAG):
posts=[]
count = 0
for id in db1:
doc = db1[id]
for tag in doc['tags']:
if(tag):
if(tag['tag']==TAG):
if(count==0):
string = '['
else:
string += ','
string += '{"username": "'+ doc['username'] +'", "post_content": "'+ doc['post_content'] +'", "post_time": "'+ doc['post_time'] +'","tags_string" : "'+ doc["tags_string"] +'"}'
count+=1
if(count!=0):
string+=']'
string = string.replace('\n',' ')
posts = json.loads(string)
posts.reverse()
return posts
if __name__ == "__main__":
app.run()
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import eventlet
import netaddr
from oslo.serialization import jsonutils
import six
from nova import exception
from nova.i18n import _
def ensure_string_keys(d):
# http://bugs.python.org/issue4978
return {str(k): v for k, v in d.iteritems()}
# Constants for the 'vif_type' field in VIF class
VIF_TYPE_OVS = 'ovs'
VIF_TYPE_IVS = 'ivs'
VIF_TYPE_DVS = 'dvs'
VIF_TYPE_IOVISOR = 'iovisor'
VIF_TYPE_BRIDGE = 'bridge'
VIF_TYPE_802_QBG = '802.1qbg'
VIF_TYPE_802_QBH = '802.1qbh'
VIF_TYPE_HW_VEB = 'hw_veb'
VIF_TYPE_MLNX_DIRECT = 'mlnx_direct'
VIF_TYPE_MIDONET = 'midonet'
VIF_TYPE_OTHER = 'other'
# Constants for dictionary keys in the 'vif_details' field in the VIF
# class
VIF_DETAILS_PORT_FILTER = 'port_filter'
VIF_DETAILS_OVS_HYBRID_PLUG = 'ovs_hybrid_plug'
VIF_DETAILS_PHYSICAL_NETWORK = 'physical_network'
# The following two constants define the SR-IOV related fields in the
# 'vif_details'. 'profileid' should be used for VIF_TYPE_802_QBH,
# 'vlan' for VIF_TYPE_HW_VEB
VIF_DETAILS_PROFILEID = 'profileid'
VIF_DETAILS_VLAN = 'vlan'
# Define supported virtual NIC types. VNIC_TYPE_DIRECT and VNIC_TYPE_MACVTAP
# are used for SR-IOV ports
VNIC_TYPE_NORMAL = 'normal'
VNIC_TYPE_DIRECT = 'direct'
VNIC_TYPE_MACVTAP = 'macvtap'
# Constants for the 'vif_model' values
VIF_MODEL_VIRTIO = 'virtio'
VIF_MODEL_NE2K_PCI = 'ne2k_pci'
VIF_MODEL_PCNET = 'pcnet'
VIF_MODEL_RTL8139 = 'rtl8139'
VIF_MODEL_E1000 = 'e1000'
VIF_MODEL_E1000E = 'e1000e'
VIF_MODEL_NETFRONT = 'netfront'
VIF_MODEL_SPAPR_VLAN = 'spapr-vlan'
# Constant for max length of network interface names
# eg 'bridge' in the Network class or 'devname' in
# the VIF class
NIC_NAME_LEN = 14
class Model(dict):
"""Defines some necessary structures for most of the network models."""
def __repr__(self):
return self.__class__.__name__ + '(' + dict.__repr__(self) + ')'
def _set_meta(self, kwargs):
# pull meta out of kwargs if it's there
self['meta'] = kwargs.pop('meta', {})
# update meta with any additional kwargs that may exist
self['meta'].update(kwargs)
def get_meta(self, key, default=None):
"""calls get(key, default) on self['meta']."""
return self['meta'].get(key, default)
class IP(Model):
"""Represents an IP address in Nova."""
def __init__(self, address=None, type=None, **kwargs):
super(IP, self).__init__()
self['address'] = address
self['type'] = type
self['version'] = kwargs.pop('version', None)
self._set_meta(kwargs)
# determine version from address if not passed in
if self['address'] and not self['version']:
try:
self['version'] = netaddr.IPAddress(self['address']).version
except netaddr.AddrFormatError:
msg = _("Invalid IP format %s") % self['address']
raise exception.InvalidIpAddressError(msg)
def __eq__(self, other):
keys = ['address', 'type', 'version']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def is_in_subnet(self, subnet):
if self['address'] and subnet['cidr']:
return (netaddr.IPAddress(self['address']) in
netaddr.IPNetwork(subnet['cidr']))
else:
return False
@classmethod
def hydrate(cls, ip):
if ip:
return cls(**ensure_string_keys(ip))
return None
class FixedIP(IP):
"""Represents a Fixed IP address in Nova."""
def __init__(self, floating_ips=None, **kwargs):
super(FixedIP, self).__init__(**kwargs)
self['floating_ips'] = floating_ips or []
if not self['type']:
self['type'] = 'fixed'
def add_floating_ip(self, floating_ip):
if floating_ip not in self['floating_ips']:
self['floating_ips'].append(floating_ip)
def floating_ip_addresses(self):
return [ip['address'] for ip in self['floating_ips']]
@staticmethod
def hydrate(fixed_ip):
fixed_ip = FixedIP(**ensure_string_keys(fixed_ip))
fixed_ip['floating_ips'] = [IP.hydrate(floating_ip)
for floating_ip in fixed_ip['floating_ips']]
return fixed_ip
def __eq__(self, other):
keys = ['address', 'type', 'version', 'floating_ips']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
class Route(Model):
"""Represents an IP Route in Nova."""
def __init__(self, cidr=None, gateway=None, interface=None, **kwargs):
super(Route, self).__init__()
self['cidr'] = cidr
self['gateway'] = gateway
self['interface'] = interface
self._set_meta(kwargs)
@classmethod
def hydrate(cls, route):
route = cls(**ensure_string_keys(route))
route['gateway'] = IP.hydrate(route['gateway'])
return route
class Subnet(Model):
"""Represents a Subnet in Nova."""
def __init__(self, cidr=None, dns=None, gateway=None, ips=None,
routes=None, **kwargs):
super(Subnet, self).__init__()
self['cidr'] = cidr
self['dns'] = dns or []
self['gateway'] = gateway
self['ips'] = ips or []
self['routes'] = routes or []
self['version'] = kwargs.pop('version', None)
self._set_meta(kwargs)
if self['cidr'] and not self['version']:
self['version'] = netaddr.IPNetwork(self['cidr']).version
def __eq__(self, other):
keys = ['cidr', 'dns', 'gateway', 'ips', 'routes', 'version']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def add_route(self, new_route):
if new_route not in self['routes']:
self['routes'].append(new_route)
def add_dns(self, dns):
if dns not in self['dns']:
self['dns'].append(dns)
def add_ip(self, ip):
if ip not in self['ips']:
self['ips'].append(ip)
def as_netaddr(self):
"""Convience function to get cidr as a netaddr object."""
return netaddr.IPNetwork(self['cidr'])
@classmethod
def hydrate(cls, subnet):
subnet = cls(**ensure_string_keys(subnet))
subnet['dns'] = [IP.hydrate(dns) for dns in subnet['dns']]
subnet['ips'] = [FixedIP.hydrate(ip) for ip in subnet['ips']]
subnet['routes'] = [Route.hydrate(route) for route in subnet['routes']]
subnet['gateway'] = IP.hydrate(subnet['gateway'])
return subnet
class Network(Model):
"""Represents a Network in Nova."""
def __init__(self, id=None, bridge=None, label=None,
subnets=None, **kwargs):
super(Network, self).__init__()
self['id'] = id
self['bridge'] = bridge
self['label'] = label
self['subnets'] = subnets or []
self._set_meta(kwargs)
def add_subnet(self, subnet):
if subnet not in self['subnets']:
self['subnets'].append(subnet)
@classmethod
def hydrate(cls, network):
if network:
network = cls(**ensure_string_keys(network))
network['subnets'] = [Subnet.hydrate(subnet)
for subnet in network['subnets']]
return network
def __eq__(self, other):
keys = ['id', 'bridge', 'label', 'subnets']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
class VIF8021QbgParams(Model):
"""Represents the parameters for a 802.1qbg VIF."""
def __init__(self, managerid, typeid, typeidversion, instanceid):
self['managerid'] = managerid
self['typeid'] = typeid
self['typeidversion'] = typeidversion
self['instanceid'] = instanceid
class VIF8021QbhParams(Model):
"""Represents the parameters for a 802.1qbh VIF."""
def __init__(self, profileid):
self['profileid'] = profileid
class VIF(Model):
"""Represents a Virtual Interface in Nova."""
def __init__(self, id=None, address=None, network=None, type=None,
details=None, devname=None, ovs_interfaceid=None,
qbh_params=None, qbg_params=None, active=False,
vnic_type=VNIC_TYPE_NORMAL, profile=None,
**kwargs):
super(VIF, self).__init__()
self['id'] = id
self['address'] = address
self['network'] = network or None
self['type'] = type
self['details'] = details or {}
self['devname'] = devname
self['ovs_interfaceid'] = ovs_interfaceid
self['qbh_params'] = qbh_params
self['qbg_params'] = qbg_params
self['active'] = active
self['vnic_type'] = vnic_type
self['profile'] = profile
self._set_meta(kwargs)
def __eq__(self, other):
keys = ['id', 'address', 'network', 'vnic_type',
'type', 'profile', 'details', 'devname',
'ovs_interfaceid', 'qbh_params', 'qbg_params',
'active']
return all(self[k] == other[k] for k in keys)
def __ne__(self, other):
return not self.__eq__(other)
def fixed_ips(self):
return [fixed_ip for subnet in self['network']['subnets']
for fixed_ip in subnet['ips']]
def floating_ips(self):
return [floating_ip for fixed_ip in self.fixed_ips()
for floating_ip in fixed_ip['floating_ips']]
def labeled_ips(self):
"""Returns the list of all IPs
The return value looks like this flat structure::
{'network_label': 'my_network',
'network_id': 'n8v29837fn234782f08fjxk3ofhb84',
'ips': [{'address': '123.123.123.123',
'version': 4,
'type: 'fixed',
'meta': {...}},
{'address': '124.124.124.124',
'version': 4,
'type': 'floating',
'meta': {...}},
{'address': 'fe80::4',
'version': 6,
'type': 'fixed',
'meta': {...}}]
"""
if self['network']:
# remove unnecessary fields on fixed_ips
ips = [IP(**ensure_string_keys(ip)) for ip in self.fixed_ips()]
for ip in ips:
# remove floating ips from IP, since this is a flat structure
# of all IPs
del ip['meta']['floating_ips']
# add floating ips to list (if any)
ips.extend(self.floating_ips())
return {'network_label': self['network']['label'],
'network_id': self['network']['id'],
'ips': ips}
return []
def is_hybrid_plug_enabled(self):
return self['details'].get(VIF_DETAILS_OVS_HYBRID_PLUG, False)
def is_neutron_filtering_enabled(self):
return self['details'].get(VIF_DETAILS_PORT_FILTER, False)
def get_physical_network(self):
phy_network = self['network']['meta'].get('physical_network')
if not phy_network:
phy_network = self['details'].get(VIF_DETAILS_PHYSICAL_NETWORK)
return phy_network
@classmethod
def hydrate(cls, vif):
vif = cls(**ensure_string_keys(vif))
vif['network'] = Network.hydrate(vif['network'])
return vif
def get_netmask(ip, subnet):
"""Returns the netmask appropriate for injection into a guest."""
if ip['version'] == 4:
return str(subnet.as_netaddr().netmask)
return subnet.as_netaddr()._prefixlen
class NetworkInfo(list):
"""Stores and manipulates network information for a Nova instance."""
# NetworkInfo is a list of VIFs
def fixed_ips(self):
"""Returns all fixed_ips without floating_ips attached."""
return [ip for vif in self for ip in vif.fixed_ips()]
def floating_ips(self):
"""Returns all floating_ips."""
return [ip for vif in self for ip in vif.floating_ips()]
@classmethod
def hydrate(cls, network_info):
if isinstance(network_info, six.string_types):
network_info = jsonutils.loads(network_info)
return cls([VIF.hydrate(vif) for vif in network_info])
def json(self):
return jsonutils.dumps(self)
def wait(self, do_raise=True):
"""A no-op method.
This is useful to avoid type checking when NetworkInfo might be
subclassed with NetworkInfoAsyncWrapper.
"""
pass
class NetworkInfoAsyncWrapper(NetworkInfo):
"""Wrapper around NetworkInfo that allows retrieving NetworkInfo
in an async manner.
This allows one to start querying for network information before
you know you will need it. If you have a long-running
operation, this allows the network model retrieval to occur in the
background. When you need the data, it will ensure the async
operation has completed.
As an example:
def allocate_net_info(arg1, arg2)
return call_neutron_to_allocate(arg1, arg2)
network_info = NetworkInfoAsyncWrapper(allocate_net_info, arg1, arg2)
[do a long running operation -- real network_info will be retrieved
in the background]
[do something with network_info]
"""
def __init__(self, async_method, *args, **kwargs):
self._gt = eventlet.spawn(async_method, *args, **kwargs)
methods = ['json', 'fixed_ips', 'floating_ips']
for method in methods:
fn = getattr(self, method)
wrapper = functools.partial(self._sync_wrapper, fn)
functools.update_wrapper(wrapper, fn)
setattr(self, method, wrapper)
def _sync_wrapper(self, wrapped, *args, **kwargs):
"""Synchronize the model before running a method."""
self.wait()
return wrapped(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__getitem__
return self._sync_wrapper(fn, *args, **kwargs)
def __iter__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__iter__
return self._sync_wrapper(fn, *args, **kwargs)
def __len__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__len__
return self._sync_wrapper(fn, *args, **kwargs)
def __str__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__str__
return self._sync_wrapper(fn, *args, **kwargs)
def __repr__(self, *args, **kwargs):
fn = super(NetworkInfoAsyncWrapper, self).__repr__
return self._sync_wrapper(fn, *args, **kwargs)
def wait(self, do_raise=True):
"""Wait for async call to finish."""
if self._gt is not None:
try:
# NOTE(comstud): This looks funky, but this object is
# subclassed from list. In other words, 'self' is really
# just a list with a bunch of extra methods. So this
# line just replaces the current list (which should be
# empty) with the result.
self[:] = self._gt.wait()
except Exception:
if do_raise:
raise
finally:
self._gt = None
| |
# -*- coding: utf-8 -*-
'''
Query and modify an LDAP database (alternative interface)
=========================================================
.. versionadded:: 2016.3.0
This is an alternative to the ``ldap`` interface provided by the
:py:mod:`ldapmod <salt.modules.ldapmod>` execution module.
:depends: - ``ldap`` Python module
'''
from __future__ import absolute_import
available_backends = set()
try:
import ldap
import ldap.ldapobject
import ldap.modlist
import ldap.sasl
available_backends.add('ldap')
except ImportError:
pass
import logging
import salt.ext.six as six
import sys
log = logging.getLogger(__name__)
def __virtual__():
'''Only load this module if the Python ldap module is present'''
return bool(len(available_backends))
class LDAPError(Exception):
'''Base class of all LDAP exceptions raised by backends.
This is only used for errors encountered while interacting with
the LDAP server; usage errors (e.g., invalid backend name) will
have a different type.
:ivar cause: backend exception object, if applicable
'''
def __init__(self, message, cause=None):
super(LDAPError, self).__init__(message)
self.cause = cause
def _convert_exception(e):
'''Convert an ldap backend exception to an LDAPError and raise it.'''
args = ('exception in ldap backend: {0}'.format(repr(e)), e)
if six.PY2:
six.reraise(LDAPError, args, sys.exc_info()[2])
else:
six.raise_from(LDAPError(*args), e)
def _bind(l, bind=None):
'''Bind helper.'''
if bind is None:
return
method = bind.get('method', 'simple')
if method is None:
return
elif method == 'simple':
l.simple_bind_s(bind.get('dn', ''), bind.get('password', ''))
elif method == 'sasl':
sasl_class = getattr(ldap.sasl,
bind.get('mechanism', 'EXTERNAL').lower())
creds = bind.get('credentials', None)
if creds is None:
creds = {}
auth = sasl_class(*creds.get('args', []), **creds.get('kwargs', {}))
l.sasl_interactive_bind_s(bind.get('dn', ''), auth)
else:
raise ValueError('unsupported bind method "' + method
+ '"; supported bind methods: simple sasl')
class _connect_ctx(object):
def __init__(self, c):
self.c = c
def __enter__(self):
return self
def __exit__(self, *exc):
pass
def connect(connect_spec=None):
'''Connect and optionally bind to an LDAP server.
:param connect_spec:
This can be an LDAP connection object returned by a previous
call to :py:func:`connect` (in which case the argument is
simply returned), ``None`` (in which case an empty dict is
used), or a dict with the following keys:
* ``'backend'``
Optional; default depends on which Python LDAP modules are
installed. Name of the Python LDAP module to use. Only
``'ldap'`` is supported at the moment.
* ``'url'``
Optional; defaults to ``'ldapi:///'``. URL to the LDAP
server.
* ``'bind'``
Optional; defaults to ``None``. Describes how to bind an
identity to the LDAP connection. If ``None``, an
anonymous connection is made. Valid keys:
* ``'method'``
Optional; defaults to ``None``. The authentication
method to use. Valid values include but are not
necessarily limited to ``'simple'``, ``'sasl'``, and
``None``. If ``None``, an anonymous connection is
made. Available methods depend on the chosen backend.
* ``'mechanism'``
Optional; defaults to ``'EXTERNAL'``. The SASL
mechanism to use. Ignored unless the method is
``'sasl'``. Available methods depend on the chosen
backend and the server's capabilities.
* ``'credentials'``
Optional; defaults to ``None``. An object specific to
the chosen SASL mechanism and backend that represents
the authentication credentials. Ignored unless the
method is ``'sasl'``.
For the ``'ldap'`` backend, this is a dictionary. If
``None``, an empty dict is used. Keys:
* ``'args'``
Optional; defaults to an empty list. A list of
arguments to pass to the SASL mechanism
constructor. See the SASL mechanism constructor
documentation in the ``ldap.sasl`` Python module.
* ``'kwargs'``
Optional; defaults to an empty dict. A dict of
keyword arguments to pass to the SASL mechanism
constructor. See the SASL mechanism constructor
documentation in the ``ldap.sasl`` Python module.
* ``'dn'``
Optional; defaults to an empty string. The
distinguished name to bind.
* ``'password'``
Optional; defaults to an empty string. Password for
binding. Ignored if the method is ``'sasl'``.
* ``'tls'``
Optional; defaults to ``None``. A backend-specific object
containing settings to override default TLS behavior.
For the ``'ldap'`` backend, this is a dictionary. Not all
settings in this dictionary are supported by all versions
of ``python-ldap`` or the underlying TLS library. If
``None``, an empty dict is used. Possible keys:
* ``'starttls'``
If present, initiate a TLS connection using StartTLS.
(The value associated with this key is ignored.)
* ``'cacertdir'``
Set the path of the directory containing CA
certificates.
* ``'cacertfile'``
Set the pathname of the CA certificate file.
* ``'certfile'``
Set the pathname of the certificate file.
* ``'cipher_suite'``
Set the allowed cipher suite.
* ``'crlcheck'``
Set the CRL evaluation strategy. Valid values are
``'none'``, ``'peer'``, and ``'all'``.
* ``'crlfile'``
Set the pathname of the CRL file.
* ``'dhfile'``
Set the pathname of the file containing the parameters
for Diffie-Hellman ephemeral key exchange.
* ``'keyfile'``
Set the pathname of the certificate key file.
* ``'newctx'``
If present, instruct the underlying TLS library to
create a new TLS context. (The value associated with
this key is ignored.)
* ``'protocol_min'``
Set the minimum protocol version.
* ``'random_file'``
Set the pathname of the random file when
``/dev/random`` and ``/dev/urandom`` are not
available.
* ``'require_cert'``
Set the certificate validation policy. Valid values
are ``'never'``, ``'hard'``, ``'demand'``,
``'allow'``, and ``'try'``.
* ``'opts'``
Optional; defaults to ``None``. A backend-specific object
containing options for the backend.
For the ``'ldap'`` backend, this is a dictionary of
OpenLDAP options to set. If ``None``, an empty dict is
used. Each key is a the name of an OpenLDAP option
constant without the ``'LDAP_OPT_'`` prefix, then
converted to lower case.
:returns:
an object representing an LDAP connection that can be used as
the ``connect_spec`` argument to any of the functions in this
module (to avoid the overhead of making and terminating
multiple connections).
This object should be used as a context manager. It is safe
to nest ``with`` statements.
CLI example:
.. code-block:: bash
salt '*' ldap3.connect "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'dn': 'cn=admin,dc=example,dc=com',
'password': 'secret'}
}"
'''
if isinstance(connect_spec, _connect_ctx):
return connect_spec
if connect_spec is None:
connect_spec = {}
backend_name = connect_spec.get('backend', 'ldap')
if backend_name not in available_backends:
raise ValueError('unsupported backend or required Python module'
+ ' unavailable: {0}'.format(backend_name))
url = connect_spec.get('url', 'ldapi:///')
try:
l = ldap.initialize(url)
l.protocol_version = ldap.VERSION3
# set up tls
tls = connect_spec.get('tls', None)
if tls is None:
tls = {}
vars = {}
for k, v in six.iteritems(tls):
if k in ('starttls', 'newctx'):
vars[k] = True
elif k in ('crlcheck', 'require_cert'):
l.set_option(getattr(ldap, 'OPT_X_TLS_' + k.upper()),
getattr(ldap, 'OPT_X_TLS_' + v.upper()))
else:
l.set_option(getattr(ldap, 'OPT_X_TLS_' + k.upper()), v)
if vars.get('starttls', False):
l.start_tls_s()
if vars.get('newctx', False):
l.set_option(ldap.OPT_X_TLS_NEWCTX, 0)
# set up other options
l.set_option(ldap.OPT_REFERRALS, 0)
opts = connect_spec.get('opts', None)
if opts is None:
opts = {}
for k, v in six.iteritems(opts):
opt = getattr(ldap, 'OPT_' + k.upper())
l.set_option(opt, v)
_bind(l, connect_spec.get('bind', None))
except ldap.LDAPError as e:
_convert_exception(e)
return _connect_ctx(l)
def search(connect_spec, base, scope='subtree', filterstr='(objectClass=*)',
attrlist=None, attrsonly=0):
'''Search an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param base:
Distinguished name of the entry at which to start the search.
:param scope:
One of the following:
* ``'subtree'``
Search the base and all of its descendants.
* ``'base'``
Search only the base itself.
* ``'onelevel'``
Search only the base's immediate children.
:param filterstr:
String representation of the filter to apply in the search.
:param attrlist:
Limit the returned attributes to those in the specified list.
If ``None``, all attributes of each entry are returned.
:param attrsonly:
If non-zero, don't return any attribute values.
:returns:
a dict of results. The dict is empty if there are no results.
The dict maps each returned entry's distinguished name to a
dict that maps each of the matching attribute names to a list
of its values.
CLI example:
.. code-block:: bash
salt '*' ldap3.search "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'dn': 'cn=admin,dc=example,dc=com',
'password': 'secret',
},
}" "base='dc=example,dc=com'"
'''
l = connect(connect_spec)
scope = getattr(ldap, 'SCOPE_' + scope.upper())
try:
results = l.c.search_s(base, scope, filterstr, attrlist, attrsonly)
except ldap.NO_SUCH_OBJECT:
results = []
except ldap.LDAPError as e:
_convert_exception(e)
return dict(results)
def add(connect_spec, dn, attributes):
'''Add an entry to an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:param attributes:
Non-empty dict mapping each of the new entry's attributes to a
non-empty iterable of values.
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.add "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret',
},
}" "dn='dc=example,dc=com'" "attributes={'example': 'values'}"
'''
l = connect(connect_spec)
# convert the "iterable of values" to lists in case that's what
# addModlist() expects (also to ensure that the caller's objects
# are not modified)
attributes = dict(((attr, list(vals))
for attr, vals in six.iteritems(attributes)))
log.info('adding entry: dn: {0} attributes: {1}'.format(
repr(dn), repr(attributes)))
modlist = ldap.modlist.addModlist(attributes)
try:
l.c.add_s(dn, modlist)
except ldap.LDAPError as e:
_convert_exception(e)
return True
def delete(connect_spec, dn):
'''Delete an entry from an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.delete "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret'}
}" dn='cn=admin,dc=example,dc=com'
'''
l = connect(connect_spec)
log.info('deleting entry: dn: {0}'.format(repr(dn)))
try:
l.c.delete_s(dn)
except ldap.LDAPError as e:
_convert_exception(e)
return True
def modify(connect_spec, dn, directives):
'''Modify an entry in an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:param directives:
Iterable of directives that indicate how to modify the entry.
Each directive is a tuple of the form ``(op, attr, vals)``,
where:
* ``op`` identifies the modification operation to perform.
One of:
* ``'add'`` to add one or more values to the attribute
* ``'delete'`` to delete some or all of the values from the
attribute. If no values are specified with this
operation, all of the attribute's values are deleted.
Otherwise, only the named values are deleted.
* ``'replace'`` to replace all of the attribute's values
with zero or more new values
* ``attr`` names the attribute to modify
* ``vals`` is an iterable of values to add or delete
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.modify "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret'}
}" dn='cn=admin,dc=example,dc=com'
directives="('add', 'example', ['example_val'])"
'''
l = connect(connect_spec)
# convert the "iterable of values" to lists in case that's what
# modify_s() expects (also to ensure that the caller's objects are
# not modified)
modlist = [(getattr(ldap, 'MOD_' + op.upper()), attr, list(vals))
for op, attr, vals in directives]
try:
l.c.modify_s(dn, modlist)
except ldap.LDAPError as e:
_convert_exception(e)
return True
def change(connect_spec, dn, before, after):
'''Modify an entry in an LDAP database.
This does the same thing as :py:func:`modify`, but with a simpler
interface. Instead of taking a list of directives, it takes a
before and after view of an entry, determines the differences
between the two, computes the directives, and executes them.
Any attribute value present in ``before`` but missing in ``after``
is deleted. Any attribute value present in ``after`` but missing
in ``before`` is added. Any attribute value in the database that
is not mentioned in either ``before`` or ``after`` is not altered.
Any attribute value that is present in both ``before`` and
``after`` is ignored, regardless of whether that attribute value
exists in the database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param dn:
Distinguished name of the entry.
:param before:
The expected state of the entry before modification. This is
a dict mapping each attribute name to an iterable of values.
:param after:
The desired state of the entry after modification. This is a
dict mapping each attribute name to an iterable of values.
:returns:
``True`` if successful, raises an exception otherwise.
CLI example:
.. code-block:: bash
salt '*' ldap3.change "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'password': 'secret'}
}" dn='cn=admin,dc=example,dc=com'
before="{'example_value': 'before_val'}"
after="{'example_value': 'after_val'}"
'''
l = connect(connect_spec)
# convert the "iterable of values" to lists in case that's what
# modifyModlist() expects (also to ensure that the caller's dicts
# are not modified)
before = dict(((attr, list(vals))
for attr, vals in six.iteritems(before)))
after = dict(((attr, list(vals))
for attr, vals in six.iteritems(after)))
modlist = ldap.modlist.modifyModlist(before, after)
try:
l.c.modify_s(dn, modlist)
except ldap.LDAPError as e:
_convert_exception(e)
return True
| |
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2012, Afterburn <http://github.com/afterburn>
# (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
# (c) 2015, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pear
short_description: Manage pear/pecl packages
description:
- Manage PHP packages with the pear package manager.
version_added: 2.0
author:
- "'jonathan.lestrelin' <jonathan.lestrelin@gmail.com>"
options:
name:
description:
- Name of the package to install, upgrade, or remove.
required: true
state:
description:
- Desired state of the package.
required: false
default: "present"
choices: ["present", "absent", "latest"]
executable:
description:
- Path to the pear executable
required: false
default: null
version_added: "2.4"
'''
EXAMPLES = '''
# Install pear package
- pear:
name: Net_URL2
state: present
# Install pecl package
- pear:
name: pecl/json_post
state: present
# Upgrade package
- pear:
name: Net_URL2
state: latest
# Remove packages
- pear:
name: Net_URL2,pecl/json_post
state: absent
'''
import os
from ansible.module_utils.basic import AnsibleModule
def get_local_version(pear_output):
"""Take pear remoteinfo output and get the installed version"""
lines = pear_output.split('\n')
for line in lines:
if 'Installed ' in line:
installed = line.rsplit(None, 1)[-1].strip()
if installed == '-':
continue
return installed
return None
def _get_pear_path(module):
if module.params['executable'] and os.path.isfile(module.params['executable']):
return module.params['executable']
else:
return module.get_bin_path('pear', True, [module.params['executable']])
def get_repository_version(pear_output):
"""Take pear remote-info output and get the latest version"""
lines = pear_output.split('\n')
for line in lines:
if 'Latest ' in line:
return line.rsplit(None, 1)[-1].strip()
return None
def query_package(module, name, state="present"):
"""Query the package status in both the local system and the repository.
Returns a boolean to indicate if the package is installed,
and a second boolean to indicate if the package is up-to-date."""
if state == "present":
lcmd = "%s info %s" % (_get_pear_path(module), name)
lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
if lrc != 0:
# package is not installed locally
return False, False
rcmd = "%s remote-info %s" % (_get_pear_path(module), name)
rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
# get the version installed locally (if any)
lversion = get_local_version(rstdout)
# get the version in the repository
rversion = get_repository_version(rstdout)
if rrc == 0:
# Return True to indicate that the package is installed locally,
# and the result of the version number comparison
# to determine if the package is up-to-date.
return True, (lversion == rversion)
return False, False
def remove_packages(module, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
installed, updated = query_package(module, package)
if not installed:
continue
cmd = "%s uninstall %s" % (_get_pear_path(module), package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, state, packages):
install_c = 0
for i, package in enumerate(packages):
# if the package is installed and state == present
# or state == latest and is up-to-date then skip
installed, updated = query_package(module, package)
if installed and (state == 'present' or (state == 'latest' and updated)):
continue
if state == 'present':
command = 'install'
if state == 'latest':
command = 'upgrade'
cmd = "%s %s %s" % (_get_pear_path(module), command, package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s" % (package))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
module.exit_json(changed=False, msg="package(s) already installed")
def check_packages(module, packages, state):
would_be_changed = []
for package in packages:
installed, updated = query_package(module, package)
if ((state in ["present", "latest"] and not installed) or
(state == "absent" and installed) or
(state == "latest" and not updated)):
would_be_changed.append(package)
if would_be_changed:
if state == "absent":
state = "removed"
module.exit_json(changed=True, msg="%s package(s) would be %s" % (
len(would_be_changed), state))
else:
module.exit_json(change=False, msg="package(s) already %s" % state)
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['pkg']),
state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
executable = dict(default=None, required=False, type='path')),
required_one_of = [['name']],
supports_check_mode = True)
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
elif p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p['name']:
pkgs = p['name'].split(',')
pkg_files = []
for i, pkg in enumerate(pkgs):
pkg_files.append(None)
if module.check_mode:
check_packages(module, pkgs, p['state'])
if p['state'] in ['present', 'latest']:
install_packages(module, p['state'], pkgs)
elif p['state'] == 'absent':
remove_packages(module, pkgs)
if __name__ == '__main__':
main()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for ops used with embeddings."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import compat
def _AsLong(array):
"""Casts arrays elements to long type. Used to convert from numpy tf."""
return [int(x) for x in array]
class ScatterAddSubTest(test.TestCase):
def _TestCase(self, shape, indices, scatter_op=state_ops.scatter_add):
"""Run a random test case with the given shape and indices.
Args:
shape: Shape of the parameters array.
indices: One-dimensional array of ints, the indices of the last dimension
of the parameters to update.
scatter_op: ScatterAdd or ScatterSub.
"""
super(ScatterAddSubTest, self).setUp()
with self.test_session(use_gpu=False):
# Create a random parameter array of given shape
p_init = np.random.rand(*shape).astype("f")
# Create the shape of the update array. All dimensions except the last
# match the parameter array, the last dimension equals the # of indices.
vals_shape = [len(indices)] + shape[1:]
vals_init = np.random.rand(*vals_shape).astype("f")
v_i = [float(x) for x in vals_init.ravel()]
p = variables.Variable(p_init)
vals = constant_op.constant(v_i, shape=vals_shape, name="vals")
ind = constant_op.constant(indices, dtype=dtypes.int32)
p2 = scatter_op(p, ind, vals, name="updated_p")
# p = init
variables.global_variables_initializer().run()
# p += vals
result = p2.eval()
# Compute the expected 'p' using numpy operations.
for i, ind in enumerate(indices):
if scatter_op == state_ops.scatter_add:
p_init.reshape(shape[0], -1)[ind, :] += (
vals_init.reshape(vals_shape[0], -1)[i, :])
else:
p_init.reshape(shape[0], -1)[ind, :] -= (
vals_init.reshape(vals_shape[0], -1)[i, :])
self.assertTrue(all((p_init == result).ravel()))
def testNoRepetitions(self):
self._TestCase([2, 2], [1])
self._TestCase([4, 4, 4], [2, 0])
self._TestCase([43, 20, 10, 10], [42, 5, 6, 1, 3, 5, 7, 9])
def testWithRepetitions(self):
self._TestCase([2, 2], [1, 1])
self._TestCase([5, 3, 9, 5], [2, 0, 4, 1, 3, 1, 4, 0, 4, 3])
self._TestCase([32, 4, 4], [31] * 8)
def testRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices))
def testSubRandom(self):
# Random shapes of rank 4, random indices
for _ in range(5):
shape = np.random.randint(1, 20, size=4)
indices = np.random.randint(shape[0], size=2 * shape[0])
self._TestCase(_AsLong(list(shape)), list(indices), state_ops.scatter_sub)
def testWrongShape(self):
# Indices and values mismatch.
var = variables.Variable(
array_ops.zeros(
shape=[1024, 64, 64], dtype=dtypes.float32))
indices = array_ops.placeholder(dtypes.int32, shape=[32])
values = array_ops.placeholder(dtypes.float32, shape=[33, 64, 64])
with self.assertRaises(ValueError):
state_ops.scatter_add(var, indices, values)
# Var and values mismatch.
values = array_ops.placeholder(dtypes.float32, shape=[32, 64, 63])
with self.assertRaises(ValueError):
state_ops.scatter_add(var, indices, values)
def _PName(param_id):
return "p" + str(param_id)
def _EmbeddingParams(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None,
use_shapeless_placeholder=False):
p = []
params = {}
feed_dict = {}
if not shape:
shape = [10]
for i in range(num_shards):
shard_shape = [vocab_size // num_shards] + shape
if i < vocab_size % num_shards: # Excess goes evenly on the first shards
shard_shape[0] += 1
param_name = _PName(i)
if use_shapeless_placeholder:
param = array_ops.placeholder(dtype, shape=None, name=param_name)
else:
param = constant_op.constant(
1.0, shape=shard_shape, dtype=dtype, name=param_name)
p.append(param)
np_type = "f" if dtype == dtypes.float32 else "d"
val = (np.random.rand(*shard_shape).astype(np_type)) + 1
params[param_name + ":0"] = val
feed_dict[param.name] = val
return p, params, feed_dict
def _EmbeddingParamsAsPartitionedVariable(num_shards,
vocab_size,
dtype=dtypes.float32,
shape=None):
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, dtype=dtype, shape=shape)
shape = shape or [10]
partitioned_variable = variable_scope.get_variable(
"p",
shape=[vocab_size] + shape,
initializer=array_ops.concat([params[p_i.name] for p_i in p], 0),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=num_shards, min_slice_size=1))
return p, partitioned_variable, params, feed_dict
def _EmbeddingResult(params,
id_vals,
num_shards,
vocab_size,
partition_strategy="mod",
weight_vals=None):
if weight_vals is None:
weight_vals = np.copy(id_vals)
weight_vals.fill(1)
values = []
weights = []
weights_squared = []
for ids, wts in zip(id_vals, weight_vals):
value_aggregation = None
weight_aggregation = None
squared_weight_aggregation = None
if isinstance(ids, compat.integral_types):
ids = [ids]
wts = [wts]
for i, weight_value in zip(ids, wts):
if partition_strategy == "mod":
val = np.copy(params[_PName(i % num_shards) + ":0"][
i // num_shards, :]) * weight_value
elif partition_strategy == "div":
ids_per_partition, extras = divmod(vocab_size, num_shards)
threshold = extras * (ids_per_partition + 1)
if i < threshold:
partition = i // (ids_per_partition + 1)
offset = i % (ids_per_partition + 1)
else:
partition = extras + (i - threshold) // ids_per_partition
offset = (i - threshold) % ids_per_partition
val = np.copy(params[_PName(partition) + ":0"][
offset, :]) * weight_value
else:
assert False
if value_aggregation is None:
assert weight_aggregation is None
assert squared_weight_aggregation is None
value_aggregation = val
weight_aggregation = weight_value
squared_weight_aggregation = weight_value * weight_value
else:
assert weight_aggregation is not None
assert squared_weight_aggregation is not None
value_aggregation += val
weight_aggregation += weight_value
squared_weight_aggregation += weight_value * weight_value
values.append(value_aggregation)
weights.append(weight_aggregation)
weights_squared.append(squared_weight_aggregation)
values = np.array(values).astype(np.float32)
weights = np.array(weights).astype(np.float32)
weights_squared = np.array(weights_squared).astype(np.float32)
return values, weights, weights_squared
class EmbeddingLookupTest(test.TestCase):
# This test looks up [0, 0] in a parameter matrix sharded 2 ways. Since
# both the ids are in the first shard, one of the resulting lookup
# vector is going to be empty. The subsequent DivOp fails because of that.
# TODO(keveman): Disabling the test until the underlying problem is fixed.
def testSimpleSharded(self):
with self.test_session():
num_shards = 2
vocab_size = 4
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testMaxNorm(self):
with self.test_session():
embeddings = constant_op.constant([[2.0]])
ids = constant_op.constant([0], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
[embeddings], ids, max_norm=1.0)
self.assertAllEqual(embedding.eval(), [[1.0]])
def testMaxNormNontrivial(self):
with self.test_session():
embeddings = constant_op.constant([[2.0, 4.0], [3.0, 1.0]])
ids = constant_op.constant([0, 1], dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
[embeddings], ids, max_norm=2.0)
norms = math_ops.sqrt(
math_ops.reduce_sum(
embeddings * embeddings, axis=1))
normalized = embeddings / array_ops.stack([norms, norms], axis=1)
self.assertAllEqual(embedding.eval(), 2 * normalized.eval())
def testSimpleShardedPartitionedVariable(self):
with self.test_session() as sess:
num_shards = 2
vocab_size = 4
p, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size)
id_vals = np.array([0, 0])
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
print("Construct ids", ids.get_shape())
embedding = embedding_ops.embedding_lookup(p_variable, ids)
variables.global_variables_initializer().run()
params_values = [params[p_i.name] for p_i in p]
# Test that the PartitionedVariable components equal the list in p
p_var_val = sess.run(list(p_variable))
# Actual test
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(params_values, p_var_val)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedModPartitioningInt32Ids(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedModPartitioningInt64Ids(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(p, ids)
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(params, id_vals, num_shards, vocab_size)
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedDivPartitioningInt32Ids(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedDivPartitioningInt32IdsPartitionedVariable(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
_, p_variable, params, feed_dict = _EmbeddingParamsAsPartitionedVariable(
num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int32)
variables.global_variables_initializer().run()
embedding = embedding_ops.embedding_lookup(
p_variable, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedDivPartitioningInt64Ids(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
p, params, feed_dict = _EmbeddingParams(num_shards, vocab_size)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
self.assertShapeEqual(np_result, embedding)
def testShardedDivPartitioningUnknownParamShape(self):
with self.test_session():
num_shards = 5
vocab_size = 13
# Embedding dimensions is 10. The vocab_size x 10 embedding
# parameters are spread in num_shards matrices, so the first
# 3 shards are 3 x 10 and the last 2 shards are 2 x 10.
# We clear parameter shapes, to test when shape is not statically known.
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, use_shapeless_placeholder=True)
num_vals = 30
# Fetch num_vals embeddings for random word ids. Since
# num_vals > vocab_size, this ought to have repetitions, so
# will test that aspect.
id_vals = np.random.randint(vocab_size, size=num_vals)
ids = constant_op.constant(list(id_vals), dtype=dtypes.int64)
embedding = embedding_ops.embedding_lookup(
p, ids, partition_strategy="div")
tf_result = embedding.eval(feed_dict=feed_dict)
np_result, _, _ = _EmbeddingResult(
params, id_vals, num_shards, vocab_size, partition_strategy="div")
self.assertAllEqual(np_result, tf_result)
def testGradientsEmbeddingLookup(self):
vocab_size = 9
num_ids = 10
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf_logging.vlog(1, id_vals)
for ids_shape in [(10,), (2, 5)]:
for num_shards in [1, 3]:
with self.test_session():
ids = constant_op.constant(
id_vals, shape=ids_shape, dtype=dtypes.int32)
x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2])
y = embedding_ops.embedding_lookup(x, ids)
y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-4)
def testGradientsEmbeddingLookupWithComputedParams(self):
vocab_size = 9
num_ids = 5
id_vals = list(np.random.randint(vocab_size, size=num_ids))
tf_logging.vlog(1, id_vals)
for num_shards in [1, 3]:
with self.test_session():
ids = constant_op.constant(id_vals, dtype=dtypes.int32)
x, params, _ = _EmbeddingParams(num_shards, vocab_size, shape=[2])
# This will force a conversion from IndexedSlices to Tensor.
x_squared = [math_ops.square(elem) for elem in x]
y = embedding_ops.embedding_lookup(x_squared, ids)
y_shape = [num_ids] + list(params[_PName(0) + ":0"].shape[1:])
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-3)
def testConstructionNonSharded(self):
with ops.Graph().as_default():
p = variables.Variable(
array_ops.zeros(
shape=[100, 100], dtype=dtypes.float32))
ids = constant_op.constant([0, 1, 1, 7], dtype=dtypes.int32)
embedding_ops.embedding_lookup([p], ids)
def testConstructionSharded(self):
with ops.Graph().as_default():
p = []
for _ in range(2):
p += [
variables.Variable(
array_ops.zeros(
shape=[100, 100], dtype=dtypes.float32))
]
ids = constant_op.constant([0, 1, 1, 17], dtype=dtypes.int32)
embedding_ops.embedding_lookup(p, ids)
def testHigherRank(self):
np.random.seed(8)
with self.test_session():
for params_shape in (12,), (6, 3):
params = np.random.randn(*params_shape)
for ids_shape in (3, 2), (4, 3):
ids = np.random.randint(
params.shape[0], size=np.prod(ids_shape)).reshape(ids_shape)
# Compare nonsharded to gather
simple = embedding_ops.embedding_lookup(params, ids).eval()
self.assertAllEqual(simple, array_ops.gather(params, ids).eval())
# Run a few random sharded versions
for procs in 1, 2, 3:
stride = procs * math_ops.range(params.shape[0] // procs)
split_params = [
array_ops.gather(params, stride + p) for p in xrange(procs)
]
sharded = embedding_ops.embedding_lookup(split_params, ids).eval()
self.assertAllEqual(simple, sharded)
class EmbeddingLookupSparseTest(test.TestCase):
def _RandomIdsAndWeights(self, batch_size, vocab_size):
max_val_per_entry = 6
vals_per_batch_entry = np.random.randint(
1, max_val_per_entry, size=batch_size)
num_vals = np.sum(vals_per_batch_entry)
ids = np.random.randint(vocab_size, size=num_vals)
weights = 1 + np.random.rand(num_vals)
indices = []
for batch_entry, num_val in enumerate(vals_per_batch_entry):
for val_index in range(num_val):
indices.append([batch_entry, val_index])
shape = [batch_size, max_val_per_entry]
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(ids, dtypes.int32),
constant_op.constant(shape, dtypes.int64))
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant(indices, dtypes.int64),
constant_op.constant(weights, dtypes.float32),
constant_op.constant(shape, dtypes.int64))
return sp_ids, sp_weights, ids, weights, vals_per_batch_entry
def _GroupByBatchEntry(self, vals, vals_per_batch_entry):
grouped_vals = []
index = 0
for num_val in vals_per_batch_entry:
grouped_vals.append(list(vals[index:(index + num_val)]))
index += num_val
return grouped_vals
def testEmbeddingLookupSparse(self):
vocab_size = 13
batch_size = 10
param_shape = [2, 5]
expected_lookup_result_shape = [None] + param_shape
sp_ids, sp_weights, ids, weights, vals_per_batch_entry = (
self._RandomIdsAndWeights(batch_size, vocab_size))
grouped_ids = self._GroupByBatchEntry(ids, vals_per_batch_entry)
grouped_weights = self._GroupByBatchEntry(weights, vals_per_batch_entry)
grouped_ignored_weights = self._GroupByBatchEntry(
np.ones(np.sum(vals_per_batch_entry)), vals_per_batch_entry)
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 5], ["sum", "mean", "sqrtn"], [dtypes.float32, dtypes.float64],
[True, False]):
with self.test_session():
p, params, feed_dict = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
embedding_sum = embedding_ops.embedding_lookup_sparse(
p,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
self.assertEqual(embedding_sum.get_shape().as_list(),
expected_lookup_result_shape)
tf_embedding_sum = embedding_sum.eval(feed_dict=feed_dict)
np_embedding_sum, np_weight_sum, np_weight_sq_sum = _EmbeddingResult(
params,
grouped_ids,
num_shards,
vocab_size,
weight_vals=grouped_ignored_weights if ignore_weights else
grouped_weights)
if combiner == "mean":
np_embedding_sum /= np.reshape(np_weight_sum, (batch_size, 1, 1))
if combiner == "sqrtn":
np_embedding_sum /= np.reshape(
np.sqrt(np_weight_sq_sum), (batch_size, 1, 1))
self.assertAllClose(np_embedding_sum, tf_embedding_sum)
def testGradientsEmbeddingLookupSparse(self):
vocab_size = 12
batch_size = 4
param_shape = [2, 3]
sp_ids, sp_weights, _, _, _ = (
self._RandomIdsAndWeights(batch_size, vocab_size))
for num_shards, combiner, dtype, ignore_weights in itertools.product(
[1, 3], ["sum", "mean", "sqrtn"], [dtypes.float32, dtypes.float64],
[True, False]):
with self.test_session():
x, params, _ = _EmbeddingParams(
num_shards, vocab_size, shape=param_shape, dtype=dtype)
y = embedding_ops.embedding_lookup_sparse(
x,
sp_ids,
None if ignore_weights else sp_weights,
combiner=combiner)
x_name = [_PName(i) for i in range(num_shards)]
x_init_value = [params[x_n + ":0"] for x_n in x_name]
x_shape = [i.shape for i in x_init_value]
y_shape = [batch_size] + list(params[_PName(0) + ":0"].shape[1:])
err = gradient_checker.compute_gradient_error(
x, x_shape, y, y_shape, x_init_value=x_init_value)
self.assertLess(err, 1e-5 if dtype == dtypes.float64 else 2e-3)
def testIncompatibleShapes(self):
with self.test_session():
x, _, _ = _EmbeddingParams(1, 10, dtype=dtypes.float32)
sp_ids = sparse_tensor.SparseTensor(
constant_op.constant([[0, 0], [0, 1], [1, 0]], dtypes.int64),
constant_op.constant([0, 1, 2], dtypes.int32),
constant_op.constant([2, 2], dtypes.int64))
sp_weights = sparse_tensor.SparseTensor(
constant_op.constant([[0, 0], [0, 1]], dtypes.int64),
constant_op.constant([12.0, 5.0], dtypes.float32),
constant_op.constant([1, 2], dtypes.int64))
with self.assertRaises(ValueError):
embedding_ops.embedding_lookup_sparse(
x, sp_ids, sp_weights, combiner="mean")
class DynamicStitchOpTest(test.TestCase):
def testCint32Cpu(self):
with self.test_session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2]), ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]), ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testCint32Gpu(self):
with self.test_session(use_gpu=True):
indices = [
ops.convert_to_tensor([0, 1, 2]), ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]), ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testInt32Cpu(self):
with self.test_session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2]), ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]), ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testInt32Gpu(self):
with self.test_session(use_gpu=True):
indices = [
ops.convert_to_tensor([0, 1, 2]), ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([12, 23, 34]), ops.convert_to_tensor([1, 2])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values).eval(), [12, 23, 1, 2])
def testSumGradArgs(self):
with self.test_session(use_gpu=False):
indices = [
ops.convert_to_tensor([0, 1, 2, 3]), ops.convert_to_tensor([2, 3])
]
values = [
ops.convert_to_tensor([2, 3, 5, 7]), ops.convert_to_tensor([1, 1])
]
self.assertAllEqual(
data_flow_ops.dynamic_stitch(indices, values).eval(), [2, 3, 1, 1])
# We expect that the values are merged in order.
def testStitchOrder(self):
with self.test_session():
indices = []
np_values = []
values = []
for _ in range(10):
indices.extend([ops.convert_to_tensor(np.arange(100).astype(np.int32))])
np_values.extend([np.random.uniform(size=100)])
values.extend([ops.convert_to_tensor(np_values[-1])])
stitched = data_flow_ops.dynamic_stitch(indices, values).eval()
self.assertAllEqual(np_values[-1], stitched)
if __name__ == "__main__":
test.main()
| |
from nose.tools import *
from framework.auth import Auth
from tests.base import ApiTestCase
from tests.factories import InstitutionFactory, AuthUserFactory, NodeFactory
from api.base.settings.defaults import API_BASE
class TestNodeRelationshipInstitutions(ApiTestCase):
def setUp(self):
super(TestNodeRelationshipInstitutions, self).setUp()
self.user = AuthUserFactory()
self.institution1 = InstitutionFactory()
self.institution2 = InstitutionFactory()
self.user.affiliated_institutions.append(self.institution1)
self.user.affiliated_institutions.append(self.institution2)
self.user.save()
self.node = NodeFactory(creator=self.user)
self.node_institutions_url = '/{0}nodes/{1}/relationships/institutions/'.format(API_BASE, self.node._id)
def create_payload(self, *institution_ids):
data = []
for id_ in institution_ids:
data.append({'type': 'institutions', 'id': id_})
return {'data': data}
def test_node_with_no_permissions(self):
user = AuthUserFactory()
user.affiliated_institutions.append(self.institution1)
user.save()
res = self.app.put_json_api(
self.node_institutions_url,
self.create_payload([self.institution1._id]),
auth=user.auth,
expect_errors=True,
)
assert_equal(res.status_code, 403)
def test_user_with_no_institution(self):
user = AuthUserFactory()
node = NodeFactory(creator=user)
res = self.app.put_json_api(
'/{0}nodes/{1}/relationships/institutions/'.format(API_BASE, node._id),
self.create_payload(self.institution1._id),
expect_errors=True,
auth=user.auth
)
assert_equal(res.status_code, 403)
def test_get_public_node(self):
self.node.is_public = True
self.node.save()
res = self.app.get(
self.node_institutions_url
)
assert_equal(res.status_code, 200)
assert_equal(res.json['data'], [])
def test_institution_does_not_exist(self):
res = self.app.put_json_api(
self.node_institutions_url,
self.create_payload('not_an_id'),
expect_errors=True,
auth=self.user.auth
)
assert_equal(res.status_code, 404)
def test_wrong_type(self):
res = self.app.put_json_api(
self.node_institutions_url,
{'data': [{'type': 'not_institution', 'id': self.institution1._id}]},
expect_errors=True,
auth=self.user.auth
)
assert_equal(res.status_code, 409)
def test_user_with_institution_and_permissions(self):
assert_not_in(self.institution1, self.node.affiliated_institutions)
assert_not_in(self.institution2, self.node.affiliated_institutions)
res = self.app.put_json_api(
self.node_institutions_url,
self.create_payload(self.institution1._id, self.institution2._id),
auth=self.user.auth
)
assert_equal(res.status_code, 200)
data = res.json['data']
ret_institutions = [inst['id'] for inst in data]
assert_in(self.institution1._id, ret_institutions)
assert_in(self.institution2._id, ret_institutions)
self.node.reload()
assert_in(self.institution1, self.node.affiliated_institutions)
assert_in(self.institution2, self.node.affiliated_institutions)
def test_user_with_institution_and_permissions_through_patch(self):
assert_not_in(self.institution1, self.node.affiliated_institutions)
assert_not_in(self.institution2, self.node.affiliated_institutions)
res = self.app.post_json_api(
self.node_institutions_url,
self.create_payload(self.institution1._id, self.institution2._id),
auth=self.user.auth
)
assert_equal(res.status_code, 200)
data = res.json['data']
ret_institutions = [inst['id'] for inst in data]
assert_in(self.institution1._id, ret_institutions)
assert_in(self.institution2._id, ret_institutions)
self.node.reload()
assert_in(self.institution1, self.node.affiliated_institutions)
assert_in(self.institution2, self.node.affiliated_institutions)
def test_user_with_institution_and_permissions_through_patch(self):
assert_not_in(self.institution1, self.node.affiliated_institutions)
assert_not_in(self.institution2, self.node.affiliated_institutions)
res = self.app.post_json_api(
self.node_institutions_url,
self.create_payload(self.institution1._id, self.institution2._id),
auth=self.user.auth
)
assert_equal(res.status_code, 201)
data = res.json['data']
ret_institutions = [inst['id'] for inst in data]
assert_in(self.institution1._id, ret_institutions)
assert_in(self.institution2._id, ret_institutions)
self.node.reload()
assert_in(self.institution1, self.node.affiliated_institutions)
assert_in(self.institution2, self.node.affiliated_institutions)
def test_remove_institutions_with_no_permissions(self):
res = self.app.put_json_api(
self.node_institutions_url,
self.create_payload(),
expect_errors=True
)
assert_equal(res.status_code, 401)
def test_remove_institutions_with_affiliated_user(self):
self.node.affiliated_institutions.append(self.institution1)
self.node.save()
assert_in(self.institution1, self.node.affiliated_institutions)
res = self.app.put_json_api(
self.node_institutions_url,
{'data': []},
auth=self.user.auth
)
assert_equal(res.status_code, 200)
self.node.reload()
assert_equal(self.node.affiliated_institutions, [])
def test_using_post_making_no_changes_returns_204(self):
self.node.affiliated_institutions.append(self.institution1)
self.node.save()
assert_in(self.institution1, self.node.affiliated_institutions)
res = self.app.post_json_api(
self.node_institutions_url,
self.create_payload(self.institution1._id),
auth=self.user.auth
)
assert_equal(res.status_code, 204)
self.node.reload()
assert_in(self.institution1, self.node.affiliated_institutions)
def test_put_not_admin_but_affiliated(self):
user = AuthUserFactory()
user.affiliated_institutions.append(self.institution1)
user.save()
self.node.add_contributor(user, auth=Auth(self.user))
self.node.save()
res = self.app.put_json_api(
self.node_institutions_url,
self.create_payload(self.institution1._id),
auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
assert_equal(self.node.affiliated_institutions, [])
def test_retrieve_private_node_no_auth(self):
res = self.app.get(self.node_institutions_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_add_through_patch_one_inst_to_node_with_inst(self):
self.node.affiliated_institutions.append(self.institution1)
self.node.save()
assert_in(self.institution1, self.node.affiliated_institutions)
assert_not_in(self.institution2, self.node.affiliated_institutions)
res = self.app.patch_json_api(
self.node_institutions_url,
self.create_payload(self.institution1._id, self.institution2._id),
auth=self.user.auth
)
assert_equal(res.status_code, 200)
self.node.reload()
assert_in(self.institution1, self.node.affiliated_institutions)
assert_in(self.institution2, self.node.affiliated_institutions)
def test_add_through_patch_one_inst_while_removing_other(self):
self.node.affiliated_institutions.append(self.institution1)
self.node.save()
assert_in(self.institution1, self.node.affiliated_institutions)
assert_not_in(self.institution2, self.node.affiliated_institutions)
res = self.app.patch_json_api(
self.node_institutions_url,
self.create_payload(self.institution2._id),
auth=self.user.auth
)
assert_equal(res.status_code, 200)
self.node.reload()
assert_not_in(self.institution1, self.node.affiliated_institutions)
assert_in(self.institution2, self.node.affiliated_institutions)
def test_add_one_inst_with_post_to_node_with_inst(self):
self.node.affiliated_institutions.append(self.institution1)
self.node.save()
assert_in(self.institution1, self.node.affiliated_institutions)
assert_not_in(self.institution2, self.node.affiliated_institutions)
res = self.app.post_json_api(
self.node_institutions_url,
self.create_payload(self.institution2._id),
auth=self.user.auth
)
assert_equal(res.status_code, 201)
self.node.reload()
assert_in(self.institution1, self.node.affiliated_institutions)
assert_in(self.institution2, self.node.affiliated_institutions)
def test_delete_nothing(self):
res = self.app.delete_json_api(
self.node_institutions_url,
self.create_payload(),
auth=self.user.auth
)
assert_equal(res.status_code, 204)
def test_delete_existing_inst(self):
self.node.affiliated_institutions.append(self.institution1)
self.node.save()
assert_in(self.institution1, self.node.affiliated_institutions)
res = self.app.delete_json_api(
self.node_institutions_url,
self.create_payload(self.institution1._id),
auth=self.user.auth
)
assert_equal(res.status_code, 204)
self.node.reload()
assert_not_in(self.institution1, self.node.affiliated_institutions)
def test_delete_not_affiliated_and_affiliated_insts(self):
self.node.affiliated_institutions.append(self.institution1)
self.node.save()
assert_in(self.institution1, self.node.affiliated_institutions)
assert_not_in(self.institution2, self.node.affiliated_institutions)
res = self.app.delete_json_api(
self.node_institutions_url,
self.create_payload(self.institution1._id, self.institution2._id),
auth=self.user.auth,
)
assert_equal(res.status_code, 204)
self.node.reload()
assert_not_in(self.institution1, self.node.affiliated_institutions)
assert_not_in(self.institution2, self.node.affiliated_institutions)
def test_delete_user_is_not_admin(self):
user = AuthUserFactory()
user.affiliated_institutions.append(self.institution1)
user.save()
self.node.affiliated_institutions.append(self.institution1)
self.node.save()
res = self.app.delete_json_api(
self.node_institutions_url,
self.create_payload(self.institution1._id),
auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, 403)
def test_delete_user_is_admin_but_not_affiliated_with_inst(self):
user = AuthUserFactory()
node = NodeFactory(creator=user)
node.affiliated_institutions.append(self.institution1)
node.save()
assert_in(self.institution1, node.affiliated_institutions)
res = self.app.delete_json_api(
'/{0}nodes/{1}/relationships/institutions/'.format(API_BASE, node._id),
self.create_payload(self.institution1._id),
auth=user.auth,
)
assert_equal(res.status_code, 204)
node.reload()
assert_not_in(self.institution1, node.affiliated_institutions)
| |
import regex as re
import itertools
from collections import defaultdict
def exact_barcode_filter(chunk, bc1, bc2, bc3, re_string=None):
if not re_string:
re_string = '(.*):CELL_(?P<CB>.*):UMI_(.*)\\n(.*)\\n\\+\\n(.*)\\n'
parser_re = re.compile(re_string)
kept = []
for read in chunk:
match = parser_re.search(read).groupdict()
cb1 = match['CB']
if bc3:
cb1, cb2, cb3 = cb1.split("-")
elif bc2:
cb1, cb2 = cb1.split("-")
if cb1 not in bc1:
continue
if bc2 and cb2 not in bc2:
continue
if bc3 and cb3 not in bc3:
continue
kept.append(read)
return kept
def correcting_barcode_filter(chunk, bc1hash, bc2hash, bc3hash, re_string=None):
if not re_string:
re_string = '(.*):CELL_(?P<CB>.*):UMI_(.*)\\n(.*)\\n\\+\\n(.*)\\n'
parser_re = re.compile(re_string)
kept = []
for read in chunk:
match = parser_re.search(read).groupdict()
cb1 = match['CB']
if bc3hash:
cb1, cb2, cb3 = cb1.split("-")
elif bc2hash:
cb1, cb2 = cb1.split("-")
bc1corrected = bc1hash[cb1]
if not bc1corrected:
continue
if bc2hash:
bc2corrected = bc2hash[cb2]
if not bc2corrected:
continue
if bc3hash:
bc3corrected = bc3hash[cb3]
if not bc3corrected:
continue
if bc3hash:
correctbc = bc1corrected + "-" + bc2corrected + "-" + bc3corrected
elif bc2hash:
correctbc = bc1corrected + "-" + bc2corrected
else:
correctbc = bc1corrected
if correctbc == match['CB']:
kept.append(read)
else:
read = read.replace("CELL_" + match['CB'], "CELL_" + correctbc)
kept.append(read)
return kept
def exact_sample_filter2(chunk, barcodes):
parser_re = re.compile('(.*):CELL_(.*):UMI_(.*):SAMPLE_(?P<SB>.*)\\n(.*)\\n\\+\\n(.*)\\n')
kept = []
for read in chunk:
match = parser_re.search(read).groupdict()
sample = match['SB']
if sample not in barcodes:
continue
kept.append(read)
return kept
def correcting_sample_filter2(chunk, barcodehash):
parser_re = re.compile('(.*):CELL_(.*):UMI_(.*):SAMPLE_(?P<SB>.*)\\n(.*)\\n\\+\\n(.*)\\n')
kept = []
for read in chunk:
match = parser_re.search(read).groupdict()
sample = match['SB']
barcodecorrected = barcodehash[sample]
if not barcodecorrected:
continue
correctbc = barcodecorrected
if correctbc == match['SB']:
kept.append(read)
else:
read = read.replace("SAMPLE_" + match['SB'], "SAMPLE_" + correctbc)
kept.append(read)
return kept
def exact_sample_filter(read, barcodes):
parser_re = re.compile('(.*):CELL_(.*):UMI_(.*):SAMPLE_(?P<SB>.*)\\n(.*)\\n\\+\\n(.*)\\n')
match = parser_re.search(read).groupdict()
sample = match['SB']
if sample not in barcodes:
return None
return read
def umi_filter(chunk):
parser_re = re.compile('(.*):CELL_(.*):UMI_(?P<MB>.*):SAMPLE_(.*)\\n(.*)\\n\\+\\n(.*)\\n')
kept = []
for read in chunk:
match = parser_re.search(read).groupdict()
MB = match['MB']
if not acgt_match(MB):
continue
else:
kept.append(read)
return kept
def append_uids(chunk):
parser_re = re.compile('(.*):CELL_(?P<CB>.*):UMI_(?P<MB>.*):SAMPLE_(?P<SB>.*)\\n(.*)\\n\\+\\n(.*)\\n')
kept = []
for read in chunk:
match = parser_re.search(read).groupdict()
CB = match['CB']
MB = match['MB']
SB = match['SB']
sample = "SAMPLE_"+ match['SB']
idx = read.find(sample)+len(sample)
read = read[:idx]+":UID_" + SB + CB + MB+ read[idx:]
kept.append(read)
return kept
def correcting_sample_filter(read, barcodehash):
parser_re = re.compile('(.*):CELL_(.*):UMI_(.*):SAMPLE_(?P<SB>.*)\\n(.*)\\n\\+\\n(.*)\\n')
match = parser_re.search(read).groupdict()
sample = match['SB']
barcodecorrected = barcodehash[sample]
if not barcodecorrected:
return None
correctbc = barcodecorrected
if correctbc == match['SB']:
return(read)
else:
read = read.replace("SAMPLE_" + match['SB'], "SAMPLE_" + correctbc)
return(read)
class MutationHash(object):
def __init__(self, strings, nedit):
self.hash = mutationhash(strings, nedit)
def __getitem__(self, barcode):
result = self.hash[barcode]
if len(result) != 1:
return None
else:
return list(result)[0]
def mutationhash(strings, nedit):
"""
produce a hash with each key a nedit distance substitution for a set of
strings. values of the hash is the set of strings the substitution could
have come from
"""
maxlen = max([len(string) for string in strings])
indexes = generate_idx(maxlen, nedit)
muthash = defaultdict(set)
for string in strings:
muthash[string].update([string])
for x in substitution_set(string, indexes):
muthash[x].update([string])
return muthash
def substitution_set(string, indexes):
"""
for a string, return a set of all possible substitutions
"""
strlen = len(string)
return {mutate_string(string, x) for x in indexes if valid_substitution(strlen, x)}
def valid_substitution(strlen, index):
"""
skip performing substitutions that are outside the bounds of the string
"""
values = index[0]
return all([strlen > i for i in values])
def generate_idx(maxlen, nedit):
"""
generate all possible nedit edits of a string. each item has the form
((index1, index2), 'A', 'G') for nedit=2
index1 will be replaced by 'A', index2 by 'G'
this covers all edits < nedit as well since some of the specified
substitutions will not change the base
"""
ALPHABET = ["A", "C", "G", "T", "N"]
indexlists = []
ALPHABETS = [ALPHABET for x in range(nedit)]
return list(itertools.product(itertools.combinations(range(maxlen), nedit),
*ALPHABETS))
def acgt_match(string):
"""
returns True if sting consist of only "A "C" "G" "T"
"""
search = re.compile(r'[^ACGT]').search
return not bool(search(string))
def mutate_string(string, tomutate):
strlist = list(string)
for i, idx in enumerate(tomutate[0]):
strlist[idx] = tomutate[i+1]
return "".join(strlist)
| |
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia
# Copyright by UWA (in the framework of the ICRAR)
# All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
"""
Create the CSV file
"""
import argparse
import csv
import logging
import os
casalog.filter('DEBUGGING')
logging.info('Starting logger for...')
LOG = logging.getLogger("create_csv_file")
class CreateCsv(object):
def __init__(self, directory_to_scan, csv_filename):
self.directory_to_scan = directory_to_scan
self._csv_filename = csv_filename
self._csv_writer = None
self._spectral_window_info = None
self._observer = None
def _extract_data(self, measurement_set):
# Open the measurement set
ms.open(measurement_set)
self._spectral_window_info = ms.getspectralwindowinfo()
scan_summary = ms.getscansummary()
data = ms.getdata(["axis_info","ha"], ifraxis=True)
ms.close()
(head, observation_name) = os.path.split(measurement_set)
elements = observation_name.split('.')
target = elements[0]
for scan_number in scan_summary.keys():
begin_time = scan_summary[scan_number]['0']['BeginTime']
end_time = scan_summary[scan_number]['0']['EndTime']
hour_angle_begin_time = self._get_hour_angle(begin_time, data)
hour_angle_end_time = self._get_hour_angle(end_time, data)
for spectral_window_number in self._spectral_window_info.keys():
number_channels = self._spectral_window_info[spectral_window_number]['NumChan']
for channel_number in range(0, number_channels):
vis_stats = visstat(
vis=measurement_set,
datacolumn='data',
scan=scan_number,
spw='{0}:{1}'.format(spectral_window_number, channel_number),
useflags=False,
)
if vis_stats is not None:
frequency = self._get_frequency(spectral_window_number, channel_number)
self._write_line(
target,
scan_number,
begin_time,
end_time,
hour_angle_begin_time,
hour_angle_end_time,
spectral_window_number,
channel_number,
frequency,
vis_stats
)
def _write_line(self, observation_id, scan_number, begin_time, end_time, hour_angle_begin_time, hour_angle_end_time, spectral_window_number, channel_number, frequency, vis_stats):
result = vis_stats[vis_stats.keys()[0]]
self._csv_writer.writerow([
observation_id,
scan_number,
begin_time,
end_time,
hour_angle_begin_time,
hour_angle_end_time,
spectral_window_number,
channel_number,
frequency,
'{0:.5f}'.format(result['max']),
'{0:.5f}'.format(result['mean']),
'{0:.5f}'.format(result['medabsdevmed']),
'{0:.5f}'.format(result['median']),
'{0:.5f}'.format(result['min']),
'{0:.5f}'.format(result['npts']),
'{0:.5f}'.format(result['quartile']),
'{0:.5f}'.format(result['rms']),
'{0:.5f}'.format(result['stddev']),
'{0:.5f}'.format(result['sum']),
'{0:.5f}'.format(result['sumsq']),
'{0:.5f}'.format(result['var']),
])
def _get_frequency(self, spectral_window_number, channel_number):
spectral_window = self._spectral_window_info[spectral_window_number]
frequency = spectral_window['Chan1Freq']
width = spectral_window['ChanWidth']
return frequency + channel_number * width
def extract_statistics(self):
list_measurement_sets = self._find_measurement_sets(self.directory_to_scan)
LOG.info('Measurement Sets: {0}'.format(list_measurement_sets))
# Open the CSV file
with open(self._csv_filename, 'wb') as csv_file:
self._csv_writer = csv.writer(csv_file, quoting=csv.QUOTE_MINIMAL)
self._csv_writer.writerow([
'target',
'scan',
'begin_time',
'end_time',
'begin_hour_angle',
'end_hour_angle',
'spectral_window',
'channel',
'frequency',
'max',
'mean',
'medabsdevmed',
'median',
'min',
'npts',
'quartile',
'rms',
'stddev',
'sum',
'sumsq',
'var',
])
for measurement_set in list_measurement_sets:
self._extract_data(measurement_set)
def _find_measurement_sets(self, directory_to_scan):
list_measurement_sets = []
for entry in os.listdir(directory_to_scan):
full_pathname = os.path.join(directory_to_scan, entry)
if entry.endswith('.ms'):
list_measurement_sets.append(full_pathname)
elif os.path.isdir(full_pathname):
list_measurement_sets.extend(self._find_measurement_sets(full_pathname))
return list_measurement_sets
@staticmethod
def _get_hour_angle(time, data):
hour_angles = data['axis_info']['time_axis']['HA']
mj_dates = data['axis_info']['time_axis']['MJDseconds']
time_in_seconds = time * 3600 * 24
count = 0
for mjd in mj_dates:
if time_in_seconds <= mjd:
return hour_angles[count]
count += 1
return hour_angles[-1]
def parse_args():
"""
This is called via Casa so we have to be a bit careful
:return:
"""
parser = argparse.ArgumentParser('Get the arguments')
parser.add_argument('arguments', nargs='+', help='the arguments')
parser.add_argument('--nologger', action="store_true")
parser.add_argument('--log2term', action="store_true")
parser.add_argument('--logfile')
parser.add_argument('-c', '--call')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
create_csv = CreateCsv(args.arguments[0], args.arguments[1])
create_csv.extract_statistics()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 13 16:29:32 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.todo',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinxcontrib.bibtex',
'sphinx.ext.viewcode']
bibtex_bibfiles = [
'source/bibtex/ref.bib',
'source/bibtex/cite.bib'
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = False
napoleon_use_rtype = False
todo_include_todos=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
Affiliation = u'Argonne National Laboratory'
project = u'TomoBank'
copyright = u'2016-2019, ' + Affiliation
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = open(os.path.join('..', 'VERSION')).read().strip()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = project+'doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index',
project+'.tex',
project+u' Documentation',
Affiliation,'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index',project,
project+u' Documentation',
[Affiliation,],
1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index',
project,
project+u' Documentation',
Affiliation,
project,
'Scientific Data Exchange'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#ztexinfo_no_detailmenu = False
# picked from http://read-the-docs.readthedocs.org/en/latest/faq.html
class Mock(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
return Mock()
def __mul__(self, other):
return Mock()
def __rmul__(self, other):
return Mock()
def __pow__(self, other):
return Mock()
def __div__(self, other):
return Mock()
def __add__(self, other):
return Mock()
def __radd__(self, other):
return Mock()
MOCK_MODULES = ['numpy']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
| |
# Copyright (c) 2002, 2003, 2005, 2006 Allan Saddi <allan@saddi.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# $Id$
"""
fcgi - a FastCGI/WSGI gateway.
For more information about FastCGI, see <http://www.fastcgi.com/>.
For more information about the Web Server Gateway Interface, see
<http://www.python.org/peps/pep-0333.html>.
Example usage:
#!/usr/bin/env python
from myapplication import app # Assume app is your WSGI application object
from fcgi import WSGIServer
WSGIServer(app).run()
See the documentation for WSGIServer/Server for more information.
On most platforms, fcgi will fallback to regular CGI behavior if run in a
non-FastCGI context. If you want to force CGI behavior, set the environment
variable FCGI_FORCE_CGI to "Y" or "y".
"""
__author__ = 'Allan Saddi <allan@saddi.com>'
__version__ = '$Revision$'
import sys
import os
import signal
import struct
import cStringIO as StringIO
import select
import socket
import errno
import traceback
try:
import thread
import threading
thread_available = True
except ImportError:
import dummy_thread as thread
import dummy_threading as threading
thread_available = False
# Apparently 2.3 doesn't define SHUT_WR? Assume it is 1 in this case.
if not hasattr(socket, 'SHUT_WR'):
socket.SHUT_WR = 1
__all__ = ['WSGIServer']
# Constants from the spec.
FCGI_LISTENSOCK_FILENO = 0
FCGI_HEADER_LEN = 8
FCGI_VERSION_1 = 1
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
FCGI_MAX_REQS = 'FCGI_MAX_REQS'
FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
FCGI_Header = '!BBHHBx'
FCGI_BeginRequestBody = '!HB5x'
FCGI_EndRequestBody = '!LB3x'
FCGI_UnknownTypeBody = '!B7x'
FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody)
FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody)
if __debug__:
import time
# Set non-zero to write debug output to a file.
DEBUG = 0
DEBUGLOG = '/tmp/fcgi.log'
def _debug(level, msg):
if DEBUG < level:
return
try:
f = open(DEBUGLOG, 'a')
f.write('%sfcgi: %s\n' % (time.ctime()[4:-4], msg))
f.close()
except:
pass
class InputStream(object):
"""
File-like object representing FastCGI input streams (FCGI_STDIN and
FCGI_DATA). Supports the minimum methods required by WSGI spec.
"""
def __init__(self, conn):
self._conn = conn
# See Server.
self._shrinkThreshold = conn.server.inputStreamShrinkThreshold
self._buf = ''
self._bufList = []
self._pos = 0 # Current read position.
self._avail = 0 # Number of bytes currently available.
self._eof = False # True when server has sent EOF notification.
def _shrinkBuffer(self):
"""Gets rid of already read data (since we can't rewind)."""
if self._pos >= self._shrinkThreshold:
self._buf = self._buf[self._pos:]
self._avail -= self._pos
self._pos = 0
assert self._avail >= 0
def _waitForData(self):
"""Waits for more data to become available."""
self._conn.process_input()
def read(self, n=-1):
if self._pos == self._avail and self._eof:
return ''
while True:
if n < 0 or (self._avail - self._pos) < n:
# Not enough data available.
if self._eof:
# And there's no more coming.
newPos = self._avail
break
else:
# Wait for more data.
self._waitForData()
continue
else:
newPos = self._pos + n
break
# Merge buffer list, if necessary.
if self._bufList:
self._buf += ''.join(self._bufList)
self._bufList = []
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readline(self, length=None):
if self._pos == self._avail and self._eof:
return ''
while True:
# Unfortunately, we need to merge the buffer list early.
if self._bufList:
self._buf += ''.join(self._bufList)
self._bufList = []
# Find newline.
i = self._buf.find('\n', self._pos)
if i < 0:
# Not found?
if self._eof:
# No more data coming.
newPos = self._avail
break
else:
# Wait for more to come.
self._waitForData()
continue
else:
newPos = i + 1
break
if length is not None:
if self._pos + length < newPos:
newPos = self._pos + length
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def __iter__(self):
return self
def next(self):
r = self.readline()
if not r:
raise StopIteration
return r
def add_data(self, data):
if not data:
self._eof = True
else:
self._bufList.append(data)
self._avail += len(data)
class MultiplexedInputStream(InputStream):
"""
A version of InputStream meant to be used with MultiplexedConnections.
Assumes the MultiplexedConnection (the producer) and the Request
(the consumer) are running in different threads.
"""
def __init__(self, conn):
super(MultiplexedInputStream, self).__init__(conn)
# Arbitrates access to this InputStream (it's used simultaneously
# by a Request and its owning Connection object).
lock = threading.RLock()
# Notifies Request thread that there is new data available.
self._lock = threading.Condition(lock)
def _waitForData(self):
# Wait for notification from add_data().
self._lock.wait()
def read(self, n=-1):
self._lock.acquire()
try:
return super(MultiplexedInputStream, self).read(n)
finally:
self._lock.release()
def readline(self, length=None):
self._lock.acquire()
try:
return super(MultiplexedInputStream, self).readline(length)
finally:
self._lock.release()
def add_data(self, data):
self._lock.acquire()
try:
super(MultiplexedInputStream, self).add_data(data)
self._lock.notify()
finally:
self._lock.release()
class OutputStream(object):
"""
FastCGI output stream (FCGI_STDOUT/FCGI_STDERR). By default, calls to
write() or writelines() immediately result in Records being sent back
to the server. Buffering should be done in a higher level!
"""
def __init__(self, conn, req, type, buffered=False):
self._conn = conn
self._req = req
self._type = type
self._buffered = buffered
self._bufList = [] # Used if buffered is True
self.dataWritten = False
self.closed = False
def _write(self, data):
length = len(data)
while length:
toWrite = min(length, self._req.server.maxwrite - FCGI_HEADER_LEN)
rec = Record(self._type, self._req.requestId)
rec.contentLength = toWrite
rec.contentData = data[:toWrite]
self._conn.writeRecord(rec)
data = data[toWrite:]
length -= toWrite
def write(self, data):
assert not self.closed
if not data:
return
self.dataWritten = True
if self._buffered:
self._bufList.append(data)
else:
self._write(data)
def writelines(self, lines):
assert not self.closed
for line in lines:
self.write(line)
def flush(self):
# Only need to flush if this OutputStream is actually buffered.
if self._buffered:
data = ''.join(self._bufList)
self._bufList = []
self._write(data)
# Though available, the following should NOT be called by WSGI apps.
def close(self):
"""Sends end-of-stream notification, if necessary."""
if not self.closed and self.dataWritten:
self.flush()
rec = Record(self._type, self._req.requestId)
self._conn.writeRecord(rec)
self.closed = True
class TeeOutputStream(object):
"""
Simple wrapper around two or more output file-like objects that copies
written data to all streams.
"""
def __init__(self, streamList):
self._streamList = streamList
def write(self, data):
for f in self._streamList:
f.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
for f in self._streamList:
f.flush()
class StdoutWrapper(object):
"""
Wrapper for sys.stdout so we know if data has actually been written.
"""
def __init__(self, stdout):
self._file = stdout
self.dataWritten = False
def write(self, data):
if data:
self.dataWritten = True
self._file.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def __getattr__(self, name):
return getattr(self._file, name)
def decode_pair(s, pos=0):
"""
Decodes a name/value pair.
The number of bytes decoded as well as the name/value pair
are returned.
"""
nameLength = ord(s[pos])
if nameLength & 128:
nameLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
valueLength = ord(s[pos])
if valueLength & 128:
valueLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
name = s[pos:pos+nameLength]
pos += nameLength
value = s[pos:pos+valueLength]
pos += valueLength
return (pos, (name, value))
def encode_pair(name, value):
"""
Encodes a name/value pair.
The encoded string is returned.
"""
nameLength = len(name)
if nameLength < 128:
s = chr(nameLength)
else:
s = struct.pack('!L', nameLength | 0x80000000L)
valueLength = len(value)
if valueLength < 128:
s += chr(valueLength)
else:
s += struct.pack('!L', valueLength | 0x80000000L)
return s + name + value
class Record(object):
"""
A FastCGI Record.
Used for encoding/decoding records.
"""
def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID):
self.version = FCGI_VERSION_1
self.type = type
self.requestId = requestId
self.contentLength = 0
self.paddingLength = 0
self.contentData = ''
def _recvall(sock, length):
"""
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may be blocking or non-blocking.)
"""
dataList = []
recvLen = 0
while length:
try:
data = sock.recv(length)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([sock], [], [])
continue
else:
raise
if not data: # EOF
break
dataList.append(data)
dataLen = len(data)
recvLen += dataLen
length -= dataLen
return ''.join(dataList), recvLen
_recvall = staticmethod(_recvall)
def read(self, sock):
"""Read and decode a Record from a socket."""
try:
header, length = self._recvall(sock, FCGI_HEADER_LEN)
except:
raise EOFError
if length < FCGI_HEADER_LEN:
raise EOFError
self.version, self.type, self.requestId, self.contentLength, \
self.paddingLength = struct.unpack(FCGI_Header, header)
if __debug__: _debug(9, 'read: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
if self.contentLength:
try:
self.contentData, length = self._recvall(sock,
self.contentLength)
except:
raise EOFError
if length < self.contentLength:
raise EOFError
if self.paddingLength:
try:
self._recvall(sock, self.paddingLength)
except:
raise EOFError
def _sendall(sock, data):
"""
Writes data to a socket and does not return until all the data is sent.
"""
length = len(data)
while length:
try:
sent = sock.send(data)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([], [sock], [])
continue
else:
raise
data = data[sent:]
length -= sent
_sendall = staticmethod(_sendall)
def write(self, sock):
"""Encode and write a Record to a socket."""
self.paddingLength = -self.contentLength & 7
if __debug__: _debug(9, 'write: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
header = struct.pack(FCGI_Header, self.version, self.type,
self.requestId, self.contentLength,
self.paddingLength)
self._sendall(sock, header)
if self.contentLength:
self._sendall(sock, self.contentData)
if self.paddingLength:
self._sendall(sock, '\x00'*self.paddingLength)
class Request(object):
"""
Represents a single FastCGI request.
These objects are passed to your handler and is the main interface
between your handler and the fcgi module. The methods should not
be called by your handler. However, server, params, stdin, stdout,
stderr, and data are free for your handler's use.
"""
def __init__(self, conn, inputStreamClass):
self._conn = conn
self.server = conn.server
self.params = {}
self.stdin = inputStreamClass(conn)
self.stdout = OutputStream(conn, self, FCGI_STDOUT)
self.stderr = OutputStream(conn, self, FCGI_STDERR, buffered=True)
self.data = inputStreamClass(conn)
def run(self):
"""Runs the handler, flushes the streams, and ends the request."""
try:
protocolStatus, appStatus = self.server.handler(self)
except:
traceback.print_exc(file=self.stderr)
self.stderr.flush()
if not self.stdout.dataWritten:
self.server.error(self)
protocolStatus, appStatus = FCGI_REQUEST_COMPLETE, 0
if __debug__: _debug(1, 'protocolStatus = %d, appStatus = %d' %
(protocolStatus, appStatus))
self._flush()
self._end(appStatus, protocolStatus)
def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
self._conn.end_request(self, appStatus, protocolStatus)
def _flush(self):
self.stdout.close()
self.stderr.close()
class CGIRequest(Request):
"""A normal CGI request disguised as a FastCGI request."""
def __init__(self, server):
# These are normally filled in by Connection.
self.requestId = 1
self.role = FCGI_RESPONDER
self.flags = 0
self.aborted = False
self.server = server
self.params = dict(os.environ)
self.stdin = sys.stdin
self.stdout = StdoutWrapper(sys.stdout) # Oh, the humanity!
self.stderr = sys.stderr
self.data = StringIO.StringIO()
def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
sys.exit(appStatus)
def _flush(self):
# Not buffered, do nothing.
pass
class Connection(object):
"""
A Connection with the web server.
Each Connection is associated with a single socket (which is
connected to the web server) and is responsible for handling all
the FastCGI message processing for that socket.
"""
_multiplexed = False
_inputStreamClass = InputStream
def __init__(self, sock, addr, server):
self._sock = sock
self._addr = addr
self.server = server
# Active Requests for this Connection, mapped by request ID.
self._requests = {}
def _cleanupSocket(self):
"""Close the Connection's socket."""
try:
self._sock.shutdown(socket.SHUT_WR)
except:
return
try:
while True:
r, w, e = select.select([self._sock], [], [])
if not r or not self._sock.recv(1024):
break
except:
pass
self._sock.close()
def run(self):
"""Begin processing data from the socket."""
self._keepGoing = True
while self._keepGoing:
try:
self.process_input()
except EOFError:
break
except (select.error, socket.error), e:
if e[0] == errno.EBADF: # Socket was closed by Request.
break
raise
self._cleanupSocket()
def process_input(self):
"""Attempt to read a single Record from the socket and process it."""
# Currently, any children Request threads notify this Connection
# that it is no longer needed by closing the Connection's socket.
# We need to put a timeout on select, otherwise we might get
# stuck in it indefinitely... (I don't like this solution.)
while self._keepGoing:
try:
r, w, e = select.select([self._sock], [], [], 1.0)
except ValueError:
# Sigh. ValueError gets thrown sometimes when passing select
# a closed socket.
raise EOFError
if r: break
if not self._keepGoing:
return
rec = Record()
rec.read(self._sock)
if rec.type == FCGI_GET_VALUES:
self._do_get_values(rec)
elif rec.type == FCGI_BEGIN_REQUEST:
self._do_begin_request(rec)
elif rec.type == FCGI_ABORT_REQUEST:
self._do_abort_request(rec)
elif rec.type == FCGI_PARAMS:
self._do_params(rec)
elif rec.type == FCGI_STDIN:
self._do_stdin(rec)
elif rec.type == FCGI_DATA:
self._do_data(rec)
elif rec.requestId == FCGI_NULL_REQUEST_ID:
self._do_unknown_type(rec)
else:
# Need to complain about this.
pass
def writeRecord(self, rec):
"""
Write a Record to the socket.
"""
rec.write(self._sock)
def end_request(self, req, appStatus=0L,
protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
"""
End a Request.
Called by Request objects. An FCGI_END_REQUEST Record is
sent to the web server. If the web server no longer requires
the connection, the socket is closed, thereby ending this
Connection (run() returns).
"""
rec = Record(FCGI_END_REQUEST, req.requestId)
rec.contentData = struct.pack(FCGI_EndRequestBody, appStatus,
protocolStatus)
rec.contentLength = FCGI_EndRequestBody_LEN
self.writeRecord(rec)
if remove:
del self._requests[req.requestId]
if __debug__: _debug(2, 'end_request: flags = %d' % req.flags)
if not (req.flags & FCGI_KEEP_CONN) and not self._requests:
self._cleanupSocket()
self._keepGoing = False
def _do_get_values(self, inrec):
"""Handle an FCGI_GET_VALUES request from the web server."""
outrec = Record(FCGI_GET_VALUES_RESULT)
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
cap = self.server.capability.get(name)
if cap is not None:
outrec.contentData += encode_pair(name, str(cap))
outrec.contentLength = len(outrec.contentData)
self.writeRecord(outrec)
def _do_begin_request(self, inrec):
"""Handle an FCGI_BEGIN_REQUEST from the web server."""
role, flags = struct.unpack(FCGI_BeginRequestBody, inrec.contentData)
req = self.server.request_class(self, self._inputStreamClass)
req.requestId, req.role, req.flags = inrec.requestId, role, flags
req.aborted = False
if not self._multiplexed and self._requests:
# Can't multiplex requests.
self.end_request(req, 0L, FCGI_CANT_MPX_CONN, remove=False)
else:
self._requests[inrec.requestId] = req
def _do_abort_request(self, inrec):
"""
Handle an FCGI_ABORT_REQUEST from the web server.
We just mark a flag in the associated Request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
req.aborted = True
def _start_request(self, req):
"""Run the request."""
# Not multiplexed, so run it inline.
req.run()
def _do_params(self, inrec):
"""
Handle an FCGI_PARAMS Record.
If the last FCGI_PARAMS Record is received, start the request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
if inrec.contentLength:
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
req.params[name] = value
else:
self._start_request(req)
def _do_stdin(self, inrec):
"""Handle the FCGI_STDIN stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.stdin.add_data(inrec.contentData)
def _do_data(self, inrec):
"""Handle the FCGI_DATA stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.data.add_data(inrec.contentData)
def _do_unknown_type(self, inrec):
"""Handle an unknown request type. Respond accordingly."""
outrec = Record(FCGI_UNKNOWN_TYPE)
outrec.contentData = struct.pack(FCGI_UnknownTypeBody, inrec.type)
outrec.contentLength = FCGI_UnknownTypeBody_LEN
self.writeRecord(outrec)
class MultiplexedConnection(Connection):
"""
A version of Connection capable of handling multiple requests
simultaneously.
"""
_multiplexed = True
_inputStreamClass = MultiplexedInputStream
def __init__(self, sock, addr, server):
super(MultiplexedConnection, self).__init__(sock, addr, server)
# Used to arbitrate access to self._requests.
lock = threading.RLock()
# Notification is posted everytime a request completes, allowing us
# to quit cleanly.
self._lock = threading.Condition(lock)
def _cleanupSocket(self):
# Wait for any outstanding requests before closing the socket.
self._lock.acquire()
while self._requests:
self._lock.wait()
self._lock.release()
super(MultiplexedConnection, self)._cleanupSocket()
def writeRecord(self, rec):
# Must use locking to prevent intermingling of Records from different
# threads.
self._lock.acquire()
try:
# Probably faster than calling super. ;)
rec.write(self._sock)
finally:
self._lock.release()
def end_request(self, req, appStatus=0L,
protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
self._lock.acquire()
try:
super(MultiplexedConnection, self).end_request(req, appStatus,
protocolStatus,
remove)
self._lock.notify()
finally:
self._lock.release()
def _do_begin_request(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_begin_request(inrec)
finally:
self._lock.release()
def _do_abort_request(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_abort_request(inrec)
finally:
self._lock.release()
def _start_request(self, req):
thread.start_new_thread(req.run, ())
def _do_params(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_params(inrec)
finally:
self._lock.release()
def _do_stdin(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_stdin(inrec)
finally:
self._lock.release()
def _do_data(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_data(inrec)
finally:
self._lock.release()
class Server(object):
"""
The FastCGI server.
Waits for connections from the web server, processing each
request.
If run in a normal CGI context, it will instead instantiate a
CGIRequest and run the handler through there.
"""
request_class = Request
cgirequest_class = CGIRequest
# Limits the size of the InputStream's string buffer to this size + the
# server's maximum Record size. Since the InputStream is not seekable,
# we throw away already-read data once this certain amount has been read.
inputStreamShrinkThreshold = 102400 - 8192
def __init__(self, handler=None, maxwrite=8192, bindAddress=None,
umask=None, multiplexed=False):
"""
handler, if present, must reference a function or method that
takes one argument: a Request object. If handler is not
specified at creation time, Server *must* be subclassed.
(The handler method below is abstract.)
maxwrite is the maximum number of bytes (per Record) to write
to the server. I've noticed mod_fastcgi has a relatively small
receive buffer (8K or so).
bindAddress, if present, must either be a string or a 2-tuple. If
present, run() will open its own listening socket. You would use
this if you wanted to run your application as an 'external' FastCGI
app. (i.e. the webserver would no longer be responsible for starting
your app) If a string, it will be interpreted as a filename and a UNIX
socket will be opened. If a tuple, the first element, a string,
is the interface name/IP to bind to, and the second element (an int)
is the port number.
Set multiplexed to True if you want to handle multiple requests
per connection. Some FastCGI backends (namely mod_fastcgi) don't
multiplex requests at all, so by default this is off (which saves
on thread creation/locking overhead). If threads aren't available,
this keyword is ignored; it's not possible to multiplex requests
at all.
"""
if handler is not None:
self.handler = handler
self.maxwrite = maxwrite
if thread_available:
try:
import resource
# Attempt to glean the maximum number of connections
# from the OS.
maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
except ImportError:
maxConns = 100 # Just some made up number.
maxReqs = maxConns
if multiplexed:
self._connectionClass = MultiplexedConnection
maxReqs *= 5 # Another made up number.
else:
self._connectionClass = Connection
self.capability = {
FCGI_MAX_CONNS: maxConns,
FCGI_MAX_REQS: maxReqs,
FCGI_MPXS_CONNS: multiplexed and 1 or 0
}
else:
self._connectionClass = Connection
self.capability = {
# If threads aren't available, these are pretty much correct.
FCGI_MAX_CONNS: 1,
FCGI_MAX_REQS: 1,
FCGI_MPXS_CONNS: 0
}
self._bindAddress = bindAddress
self._umask = umask
def _setupSocket(self):
if self._bindAddress is None: # Run as a normal FastCGI?
isFCGI = True
sock = socket.fromfd(FCGI_LISTENSOCK_FILENO, socket.AF_INET,
socket.SOCK_STREAM)
try:
sock.getpeername()
except socket.error, e:
if e[0] == errno.ENOTSOCK:
# Not a socket, assume CGI context.
isFCGI = False
elif e[0] != errno.ENOTCONN:
raise
# FastCGI/CGI discrimination is broken on Mac OS X.
# Set the environment variable FCGI_FORCE_CGI to "Y" or "y"
# if you want to run your app as a simple CGI. (You can do
# this with Apache's mod_env [not loaded by default in OS X
# client, ha ha] and the SetEnv directive.)
if not isFCGI or \
os.environ.get('FCGI_FORCE_CGI', 'N').upper().startswith('Y'):
req = self.cgirequest_class(self)
req.run()
sys.exit(0)
else:
# Run as a server
oldUmask = None
if type(self._bindAddress) is str:
# Unix socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.unlink(self._bindAddress)
except OSError:
pass
if self._umask is not None:
oldUmask = os.umask(self._umask)
else:
# INET socket
assert type(self._bindAddress) is tuple
assert len(self._bindAddress) == 2
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(self._bindAddress)
sock.listen(socket.SOMAXCONN)
if oldUmask is not None:
os.umask(oldUmask)
return sock
def _cleanupSocket(self, sock):
"""Closes the main socket."""
sock.close()
def _installSignalHandlers(self):
self._oldSIGs = [(x,signal.getsignal(x)) for x in
(signal.SIGHUP, signal.SIGINT, signal.SIGTERM)]
signal.signal(signal.SIGHUP, self._hupHandler)
signal.signal(signal.SIGINT, self._intHandler)
signal.signal(signal.SIGTERM, self._intHandler)
def _restoreSignalHandlers(self):
for signum,handler in self._oldSIGs:
signal.signal(signum, handler)
def _hupHandler(self, signum, frame):
self._hupReceived = True
self._keepGoing = False
def _intHandler(self, signum, frame):
self._keepGoing = False
def run(self, timeout=1.0):
"""
The main loop. Exits on SIGHUP, SIGINT, SIGTERM. Returns True if
SIGHUP was received, False otherwise.
"""
web_server_addrs = os.environ.get('FCGI_WEB_SERVER_ADDRS')
if web_server_addrs is not None:
web_server_addrs = map(lambda x: x.strip(),
web_server_addrs.split(','))
sock = self._setupSocket()
self._keepGoing = True
self._hupReceived = False
# Install signal handlers.
self._installSignalHandlers()
while self._keepGoing:
try:
r, w, e = select.select([sock], [], [], timeout)
except select.error, e:
if e[0] == errno.EINTR:
continue
raise
if r:
try:
clientSock, addr = sock.accept()
except socket.error, e:
if e[0] in (errno.EINTR, errno.EAGAIN):
continue
raise
if web_server_addrs and \
(len(addr) != 2 or addr[0] not in web_server_addrs):
clientSock.close()
continue
# Instantiate a new Connection and begin processing FastCGI
# messages (either in a new thread or this thread).
conn = self._connectionClass(clientSock, addr, self)
thread.start_new_thread(conn.run, ())
self._mainloopPeriodic()
# Restore signal handlers.
self._restoreSignalHandlers()
self._cleanupSocket(sock)
return self._hupReceived
def _mainloopPeriodic(self):
"""
Called with just about each iteration of the main loop. Meant to
be overridden.
"""
pass
def _exit(self, reload=False):
"""
Protected convenience method for subclasses to force an exit. Not
really thread-safe, which is why it isn't public.
"""
if self._keepGoing:
self._keepGoing = False
self._hupReceived = reload
def handler(self, req):
"""
Default handler, which just raises an exception. Unless a handler
is passed at initialization time, this must be implemented by
a subclass.
"""
raise NotImplementedError, self.__class__.__name__ + '.handler'
def error(self, req):
"""
Called by Request if an exception occurs within the handler. May and
should be overridden.
"""
import cgitb
req.stdout.write('Content-Type: text/html\r\n\r\n' +
cgitb.html(sys.exc_info()))
class WSGIServer(Server):
"""
FastCGI server that supports the Web Server Gateway Interface. See
<http://www.python.org/peps/pep-0333.html>.
"""
def __init__(self, application, environ=None,
multithreaded=True, **kw):
"""
environ, if present, must be a dictionary-like object. Its
contents will be copied into application's environ. Useful
for passing application-specific variables.
Set multithreaded to False if your application is not MT-safe.
"""
if kw.has_key('handler'):
del kw['handler'] # Doesn't make sense to let this through
super(WSGIServer, self).__init__(**kw)
if environ is None:
environ = {}
self.application = application
self.environ = environ
self.multithreaded = multithreaded
# Used to force single-threadedness
self._app_lock = thread.allocate_lock()
def handler(self, req):
"""Special handler for WSGI."""
if req.role != FCGI_RESPONDER:
return FCGI_UNKNOWN_ROLE, 0
# Mostly taken from example CGI gateway.
environ = req.params
environ.update(self.environ)
environ['wsgi.version'] = (1,0)
environ['wsgi.input'] = req.stdin
if self._bindAddress is None:
stderr = req.stderr
else:
stderr = TeeOutputStream((sys.stderr, req.stderr))
environ['wsgi.errors'] = stderr
environ['wsgi.multithread'] = not isinstance(req, CGIRequest) and \
thread_available and self.multithreaded
# Rationale for the following: If started by the web server
# (self._bindAddress is None) in either FastCGI or CGI mode, the
# possibility of being spawned multiple times simultaneously is quite
# real. And, if started as an external server, multiple copies may be
# spawned for load-balancing/redundancy. (Though I don't think
# mod_fastcgi supports this?)
environ['wsgi.multiprocess'] = True
environ['wsgi.run_once'] = isinstance(req, CGIRequest)
if environ.get('HTTPS', 'off') in ('on', '1'):
environ['wsgi.url_scheme'] = 'https'
else:
environ['wsgi.url_scheme'] = 'http'
self._sanitizeEnv(environ)
headers_set = []
headers_sent = []
result = None
def write(data):
assert type(data) is str, 'write() argument must be string'
assert headers_set, 'write() before start_response()'
if not headers_sent:
status, responseHeaders = headers_sent[:] = headers_set
found = False
for header,value in responseHeaders:
if header.lower() == 'content-length':
found = True
break
if not found and result is not None:
try:
if len(result) == 1:
responseHeaders.append(('Content-Length',
str(len(data))))
except:
pass
s = 'Status: %s\r\n' % status
for header in responseHeaders:
s += '%s: %s\r\n' % header
s += '\r\n'
req.stdout.write(s)
req.stdout.write(data)
req.stdout.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
# Re-raise if too late
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
else:
assert not headers_set, 'Headers already set!'
assert type(status) is str, 'Status must be a string'
assert len(status) >= 4, 'Status must be at least 4 characters'
assert int(status[:3]), 'Status must begin with 3-digit code'
assert status[3] == ' ', 'Status must have a space after code'
assert type(response_headers) is list, 'Headers must be a list'
if __debug__:
for name,val in response_headers:
assert type(name) is str, 'Header names must be strings'
assert type(val) is str, 'Header values must be strings'
headers_set[:] = [status, response_headers]
return write
if not self.multithreaded:
self._app_lock.acquire()
try:
try:
result = self.application(environ, start_response)
try:
for data in result:
if data:
write(data)
if not headers_sent:
write('') # in case body was empty
finally:
if hasattr(result, 'close'):
result.close()
except socket.error, e:
if e[0] != errno.EPIPE:
raise # Don't let EPIPE propagate beyond server
finally:
if not self.multithreaded:
self._app_lock.release()
return FCGI_REQUEST_COMPLETE, 0
def _sanitizeEnv(self, environ):
"""Ensure certain values are present, if required by WSGI."""
if not environ.has_key('SCRIPT_NAME'):
environ['SCRIPT_NAME'] = ''
if not environ.has_key('PATH_INFO'):
environ['PATH_INFO'] = ''
# If any of these are missing, it probably signifies a broken
# server...
for name,default in [('REQUEST_METHOD', 'GET'),
('SERVER_NAME', 'localhost'),
('SERVER_PORT', '80'),
('SERVER_PROTOCOL', 'HTTP/1.0')]:
if not environ.has_key(name):
environ['wsgi.errors'].write('%s: missing FastCGI param %s '
'required by WSGI!\n' %
(self.__class__.__name__, name))
environ[name] = default
if __name__ == '__main__':
def test_app(environ, start_response):
"""Probably not the most efficient example."""
import cgi
start_response('200 OK', [('Content-Type', 'text/html')])
yield '<html><head><title>Hello World!</title></head>\n' \
'<body>\n' \
'<p>Hello World!</p>\n' \
'<table border="1">'
names = environ.keys()
names.sort()
for name in names:
yield '<tr><td>%s</td><td>%s</td></tr>\n' % (
name, cgi.escape(`environ[name]`))
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ,
keep_blank_values=1)
if form.list:
yield '<tr><th colspan="2">Form data</th></tr>'
for field in form.list:
yield '<tr><td>%s</td><td>%s</td></tr>\n' % (
field.name, field.value)
yield '</table>\n' \
'</body></html>\n'
WSGIServer(test_app).run()
| |
""".. Ignore pydocstyle D400.
=======
Resolwe
=======
.. autoclass:: resdk.Resolwe
:members:
"""
import getpass
import logging
import ntpath
import os
import re
import uuid
from urllib.parse import urljoin
import requests
import slumber
from .constants import CHUNK_SIZE
from .exceptions import ValidationError, handle_http_exception
from .query import ResolweQuery
from .resources import (
Collection,
Data,
DescriptorSchema,
Geneset,
Group,
Process,
Relation,
Sample,
User,
)
from .resources.base import BaseResource
from .resources.kb import Feature, Mapping
from .resources.utils import get_collection_id, get_data_id, is_data, iterate_fields
from .utils import is_email
DEFAULT_URL = "http://localhost:8000"
class ResolweResource(slumber.Resource):
"""Wrapper around slumber's Resource with custom exceptions handler."""
def __getattribute__(self, item):
"""Return class attribute and wrapp request methods in exception handler."""
attr = super().__getattribute__(item)
if item in ["get", "options", "head", "post", "patch", "put", "delete"]:
return handle_http_exception(attr)
return attr
class ResolweAPI(slumber.API):
"""Use custom ResolweResource resource class in slumber's API."""
resource_class = ResolweResource
class Resolwe:
"""Connect to a Resolwe server.
:param username: user's username
:type username: str
:param password: user's password
:type password: str
:param url: Resolwe server instance
:type url: str
"""
# Map resource class to ResolweQuery name
resource_query_mapping = {
Data: "data",
Collection: "collection",
Sample: "sample",
Relation: "relation",
Process: "process",
DescriptorSchema: "descriptor_schema",
User: "user",
Group: "group",
Feature: "feature",
Mapping: "mapping",
Geneset: "geneset",
}
# Map ResolweQuery name to it's slug_field
slug_field_mapping = {
"user": "username",
"group": "name",
}
# Map ResolweQuery name to it's default query filter
query_filter_mapping = {
"geneset": {"type": "data:geneset"},
}
data = None
collection = None
sample = None
relation = None
process = None
descriptor_schema = None
user = None
group = None
feature = None
mapping = None
geneset = None
session = None
def __init__(self, username=None, password=None, url=None):
"""Initialize attributes."""
self.session = requests.Session()
if url is None:
# Try to get URL from environmental variable, otherwise fallback to default.
url = os.environ.get("RESOLWE_HOST_URL", DEFAULT_URL)
self._validate_url(url)
if username is None:
username = os.environ.get("RESOLWE_API_USERNAME", None)
if password is None:
password = os.environ.get("RESOLWE_API_PASSWORD", None)
self.url = url
self._login(username=username, password=password)
self.logger = logging.getLogger(__name__)
def _validate_url(self, url):
if not re.match(r"https?://", url):
raise ValueError("Server url must start with http(s)://")
try:
self.session.get(urljoin(url, "/api/"))
except requests.exceptions.ConnectionError:
raise ValueError("The site can't be reached: {}".format(url))
def _initialize_queries(self):
"""Initialize ResolweQuery's."""
for resource, query_name in self.resource_query_mapping.items():
slug_field = self.slug_field_mapping.get(query_name, "slug")
query = ResolweQuery(self, resource, slug_field=slug_field)
if query_name in self.query_filter_mapping:
query = query.filter(**self.query_filter_mapping[query_name])
setattr(self, query_name, query)
def _login(self, username=None, password=None):
self.auth = ResAuth(username, password, self.url)
self.session.cookies = requests.utils.cookiejar_from_dict(self.auth.cookies)
self.api = ResolweAPI(
urljoin(self.url, "/api/"),
self.auth,
session=self.session,
append_slash=False,
)
self._initialize_queries()
def login(self, username=None, password=None):
"""Interactive login.
Ask the user to enter credentials in command prompt. If
username / email and password are given, login without prompt.
"""
if username is None:
username = input("Username (or email): ")
if password is None:
password = getpass.getpass("Password: ")
self._login(username=username, password=password)
def get_query_by_resource(self, resource):
"""Get ResolweQuery for a given resource."""
if isinstance(resource, BaseResource):
resource = resource.__class__
elif not issubclass(resource, BaseResource):
raise ValueError(
"Provide a Resource class or it's instance as a resource argument."
)
return getattr(self, self.resource_query_mapping.get(resource))
def __repr__(self):
"""Return string representation of the current object."""
if self.auth.username:
return "Resolwe <url: {}, username: {}>".format(
self.url, self.auth.username
)
return "Resolwe <url: {}>".format(self.url)
def _process_file_field(self, path):
"""Process file field and return it in resolwe-specific format.
Upload referenced file if it is stored locally and return
original filename and it's temporary location.
:param path: path to file (local or url)
:type path: str/path
:rtype: dict
"""
if isinstance(path, dict) and "file" in path and "file_temp" in path:
return path
url_regex = (
r"^(https?|ftp)://[-A-Za-z0-9\+&@#/%?=~_|!:,.;]*[-A-Za-z0-9\+&@#/%=~_|]$"
)
if re.match(url_regex, path):
file_name = path.split("/")[-1].split("#")[0].split("?")[0]
return {"file": file_name, "file_temp": path}
if not os.path.isfile(path):
raise ValueError("File {} not found.".format(path))
file_temp = self._upload_file(path)
if not file_temp:
raise Exception("Upload failed for {}.".format(path))
file_name = ntpath.basename(path)
return {
"file": file_name,
"file_temp": file_temp,
}
def _get_process(self, slug=None):
"""Return process with given slug.
Raise error if process doesn't exist or more than one is returned.
"""
return self.process.get(slug=slug)
def _process_inputs(self, inputs, process):
"""Process input fields.
Processing includes:
* wrapping ``list:*`` to the list if they are not already
* dehydrating values of ``data:*`` and ``list:data:*`` fields
* uploading files in ``basic:file:`` and ``list:basic:file:``
fields
"""
def deep_copy(current):
"""Copy inputs."""
if isinstance(current, dict):
return {key: deep_copy(val) for key, val in current.items()}
elif isinstance(current, list):
return [deep_copy(val) for val in current]
elif is_data(current):
return current.id
else:
return current
# leave original intact
inputs = deep_copy(inputs)
try:
for schema, fields in iterate_fields(inputs, process.input_schema):
field_name = schema["name"]
field_type = schema["type"]
field_value = fields[field_name]
# XXX: Remove this when supported on server.
# Wrap `list:` fields into list if they are not already
if field_type.startswith("list:") and not isinstance(field_value, list):
fields[field_name] = [field_value]
field_value = fields[
field_name
] # update value for the rest of the loop
# Dehydrate `data:*` fields
if field_type.startswith("data:"):
fields[field_name] = get_data_id(field_value)
# Dehydrate `list:data:*` fields
elif field_type.startswith("list:data:"):
fields[field_name] = [get_data_id(data) for data in field_value]
# Upload files in `basic:file:` fields
elif field_type == "basic:file:":
fields[field_name] = self._process_file_field(field_value)
# Upload files in list:basic:file:` fields
elif field_type == "list:basic:file:":
fields[field_name] = [
self._process_file_field(obj) for obj in field_value
]
except KeyError as key_error:
field_name = key_error.args[0]
slug = process.slug
raise ValidationError(
"Field '{}' not in process '{}' input schema.".format(field_name, slug)
)
return inputs
def run(
self,
slug=None,
input={},
descriptor=None,
descriptor_schema=None,
collection=None,
data_name="",
process_resources=None,
):
"""Run process and return the corresponding Data object.
1. Upload files referenced in inputs
2. Create Data object with given inputs
3. Command is run that processes inputs into outputs
4. Return Data object
The processing runs asynchronously, so the returned Data
object does not have an OK status or outputs when returned.
Use data.update() to refresh the Data resource object.
:param str slug: Process slug (human readable unique identifier)
:param dict input: Input values
:param dict descriptor: Descriptor values
:param str descriptor_schema: A valid descriptor schema slug
:param int/resource collection: Collection resource or it's id
into which data object should be included
:param str data_name: Default name of data object
:param dict process_resources: Process resources
:return: data object that was just created
:rtype: Data object
"""
if (descriptor and not descriptor_schema) or (
not descriptor and descriptor_schema
):
raise ValueError("Set both or neither descriptor and descriptor_schema.")
process = self._get_process(slug)
data = {
"process": {"slug": process.slug},
"input": self._process_inputs(input, process),
}
if descriptor and descriptor_schema:
data["descriptor"] = descriptor
data["descriptor_schema"] = {"slug": descriptor_schema}
if collection:
data["collection"] = {"id": get_collection_id(collection)}
if data_name:
data["name"] = data_name
if process_resources is not None:
if not isinstance(process_resources, dict):
raise ValueError("Argument process_resources must be a dictionary.")
if set(process_resources.keys()) - set(["cores", "memory", "storage"]):
raise ValueError(
"Argument process_resources can only have cores, memory or storage keys."
)
data["process_resources"] = process_resources
model_data = self.api.data.post(data)
return Data(resolwe=self, **model_data)
def get_or_run(self, slug=None, input={}):
"""Return existing object if found, otherwise create new one.
:param str slug: Process slug (human readable unique identifier)
:param dict input: Input values
"""
process = self._get_process(slug)
inputs = self._process_inputs(input, process)
data = {
"process": process.slug,
"input": inputs,
}
model_data = self.api.data.get_or_create.post(data)
return Data(resolwe=self, **model_data)
def _upload_file(self, file_path):
"""Upload a single file to the server.
File is uploaded in chunks of size CHUNK_SIZE bytes.
:param str file_path: File path
"""
response = None
chunk_number = 0
session_id = str(uuid.uuid4())
file_uid = str(uuid.uuid4())
file_size = os.path.getsize(file_path)
base_name = os.path.basename(file_path)
with open(file_path, "rb") as file_:
while True:
chunk = file_.read(CHUNK_SIZE)
if not chunk:
break
for i in range(5):
if i > 0 and response is not None:
self.logger.warning(
"Chunk upload failed (error %s): repeating for chunk number %s",
response.status_code,
chunk_number,
)
response = self.session.post(
urljoin(self.url, "upload/"),
auth=self.auth,
# request are smart and make
# 'CONTENT_TYPE': 'multipart/form-data;''
files={"file": (base_name, chunk)},
# stuff in data will be in response.POST on server
data={
"_chunkSize": CHUNK_SIZE,
"_totalSize": file_size,
"_chunkNumber": chunk_number,
"_currentChunkSize": len(chunk),
},
headers={"Session-Id": session_id, "X-File-Uid": file_uid},
)
if response.status_code in [200, 201]:
break
else:
# Upload of a chunk failed (5 retries)
return None
progress = 100.0 * (chunk_number * CHUNK_SIZE + len(chunk)) / file_size
message = "{:.0f} % Uploaded {}".format(progress, file_path)
self.logger.info(message)
chunk_number += 1
return response.json()["files"][0]["temp"]
def _download_files(self, files, download_dir=None):
"""Download files.
Download files from the Resolwe server to the download
directory (defaults to the current working directory).
:param files: files to download
:type files: list of file URI
:param download_dir: download directory
:type download_dir: string
:rtype: None
"""
if not download_dir:
download_dir = os.getcwd()
if not os.path.isdir(download_dir):
raise ValueError(
"Download directory does not exist: {}".format(download_dir)
)
if not files:
self.logger.info("No files to download.")
else:
self.logger.info("Downloading files to %s:", download_dir)
for file_uri in files:
file_name = os.path.basename(file_uri)
file_path = os.path.dirname(file_uri)
file_url = urljoin(self.url, "data/{}".format(file_uri))
# Remove data id from path
file_path = file_path.split("/", 1)[1] if "/" in file_path else ""
full_path = os.path.join(download_dir, file_path)
if not os.path.isdir(full_path):
os.makedirs(full_path)
self.logger.info("* %s", os.path.join(file_path, file_name))
with open(
os.path.join(download_dir, file_path, file_name), "wb"
) as file_handle:
response = self.session.get(file_url, stream=True, auth=self.auth)
if not response.ok:
response.raise_for_status()
else:
for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
file_handle.write(chunk)
def data_usage(self, **query_params):
"""Get per-user data usage information.
Display number of samples, data objects and sum of data object
sizes for currently logged-in user. For admin users, display
data for **all** users.
"""
return self.api.base.data_usage.get(**query_params)
class ResAuth(requests.auth.AuthBase):
"""HTTP Resolwe Authentication for Request object.
:param str username: user's username
:param str password: user's password
:param str url: Resolwe server address
"""
#: Session ID used in HTTP requests
sessionid = None
#: CSRF token used in HTTP requests
csrftoken = None
def __init__(self, username=None, password=None, url=DEFAULT_URL):
"""Authenticate user on Resolwe server."""
self.logger = logging.getLogger(__name__)
self.cookies = {}
self.username = username
self.url = url
if not username and not password:
return
key = "email" if is_email(username) else "username"
payload = {key: username, "password": password}
try:
response = requests.post(urljoin(url, "/rest-auth/login/"), data=payload)
except requests.exceptions.ConnectionError:
raise ValueError("Server not accessible on {}. Wrong url?".format(url))
status_code = response.status_code
if status_code in [400, 403]:
msg = "Response HTTP status code {}. Invalid credentials?".format(
status_code
)
raise ValueError(msg)
if not ("sessionid" in response.cookies and "csrftoken" in response.cookies):
raise Exception("Missing sessionid or csrftoken. Invalid credentials?")
self.sessionid = response.cookies["sessionid"]
self.csrftoken = response.cookies["csrftoken"]
self.url = url
self.cookies = {"csrftoken": self.csrftoken, "sessionid": self.sessionid}
def __call__(self, request):
"""Set request headers."""
if self.sessionid and self.csrftoken:
request.headers["X-CSRFToken"] = self.csrftoken
request.headers["referer"] = self.url
# Not needed until we support HTTP Push with the API
# if r.path_url != '/upload/':
# r.headers['X-SubscribeID'] = self.subscribe_id
return request
| |
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 4:
continue
name = pkg_infos[5]
name = name.lstrip('[').rstrip(']')
print "name is: %s" % name
if pkg_name == name:
test_pkg_id = pkg_infos[3]
test_pkg_id = test_pkg_id.lstrip('[').rstrip(']')
print test_pkg_id
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for pkg_name in ["tct-alarm-tizen-tests", "alarmTestApp"]:
pkg_id = getPKGID(pkg_name)
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD("pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| |
# Copyright (c) 2016 EMC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Driver for EMC CoprHD FC volumes."""
import re
from oslo_log import log as logging
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.coprhd import common as coprhd_common
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
@interface.volumedriver
class EMCCoprHDFCDriver(driver.FibreChannelDriver):
"""CoprHD FC Driver."""
VERSION = "3.0.0.0"
# ThirdPartySystems wiki page
CI_WIKI_NAME = "EMC_CoprHD_CI"
def __init__(self, *args, **kwargs):
super(EMCCoprHDFCDriver, self).__init__(*args, **kwargs)
self.common = self._get_common_driver()
def _get_common_driver(self):
return coprhd_common.EMCCoprHDDriverCommon(
protocol='FC',
default_backend_name=self.__class__.__name__,
configuration=self.configuration)
def check_for_setup_error(self):
self.common.check_for_setup_error()
def create_volume(self, volume):
"""Creates a Volume."""
self.common.create_volume(volume, self)
self.common.set_volume_tags(volume, ['_obj_volume_type'])
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned Volume."""
self.common.create_cloned_volume(volume, src_vref)
self.common.set_volume_tags(volume, ['_obj_volume_type'])
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self.common.create_volume_from_snapshot(snapshot, volume)
self.common.set_volume_tags(volume, ['_obj_volume_type'])
def extend_volume(self, volume, new_size):
"""expands the size of the volume."""
self.common.expand_volume(volume, new_size)
def delete_volume(self, volume):
"""Deletes a volume."""
self.common.delete_volume(volume)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.common.create_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.common.delete_snapshot(snapshot)
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume, connector=None):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver exntry point to remove an export for a volume."""
pass
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
return self.common.create_consistencygroup(context, group)
def update_consistencygroup(self, context, group, add_volumes=None,
remove_volumes=None):
"""Updates volumes in consistency group."""
return self.common.update_consistencygroup(group, add_volumes,
remove_volumes)
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
return self.common.delete_consistencygroup(context, group, volumes)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
return self.common.create_cgsnapshot(cgsnapshot, snapshots)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
return self.common.delete_cgsnapshot(cgsnapshot, snapshots)
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
@fczm_utils.add_fc_zone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info."""
properties = {}
properties['volume_id'] = volume['id']
properties['target_discovered'] = False
properties['target_wwn'] = []
init_ports = self._build_initport_list(connector)
itls = self.common.initialize_connection(volume,
'FC',
init_ports,
connector['host'])
target_wwns = None
initiator_target_map = None
if itls:
properties['target_lun'] = itls[0]['hlu']
target_wwns, initiator_target_map = (
self._build_initiator_target_map(itls, connector))
properties['target_wwn'] = target_wwns
properties['initiator_target_map'] = initiator_target_map
auth = volume['provider_auth']
if auth:
(auth_method, auth_username, auth_secret) = auth.split()
properties['auth_method'] = auth_method
properties['auth_username'] = auth_username
properties['auth_password'] = auth_secret
LOG.debug('FC properties: %s', properties)
return {
'driver_volume_type': 'fibre_channel',
'data': properties,
}
@fczm_utils.remove_fc_zone
def terminate_connection(self, volume, connector, **kwargs):
"""Driver entry point to detach a volume from an instance."""
init_ports = self._build_initport_list(connector)
itls = self.common.terminate_connection(volume,
'FC',
init_ports,
connector['host'])
volumes_count = self.common.get_exports_count_by_initiators(init_ports)
if volumes_count > 0:
# return empty data
data = {'driver_volume_type': 'fibre_channel', 'data': {}}
else:
target_wwns, initiator_target_map = (
self._build_initiator_target_map(itls, connector))
data = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_wwn': target_wwns,
'initiator_target_map': initiator_target_map}}
LOG.debug('Return FC data: %s', data)
return data
def _build_initiator_target_map(self, itls, connector):
target_wwns = []
for itl in itls:
target_wwns.append(itl['target']['port'].replace(':', '').lower())
initiator_wwns = connector['wwpns']
initiator_target_map = {}
for initiator in initiator_wwns:
initiator_target_map[initiator] = target_wwns
return target_wwns, initiator_target_map
def _build_initport_list(self, connector):
init_ports = []
for i in range(len(connector['wwpns'])):
initiator_port = ':'.join(re.findall(
'..',
connector['wwpns'][i])).upper() # Add ":" every two digits
init_ports.append(initiator_port)
return init_ports
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self.update_volume_stats()
return self._stats
def update_volume_stats(self):
"""Retrieve stats info from virtual pool/virtual array."""
LOG.debug("Updating volume stats")
self._stats = self.common.update_volume_stats()
def retype(self, ctxt, volume, new_type, diff, host):
"""Change the volume type."""
return self.common.retype(ctxt, volume, new_type, diff, host)
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import re
import six
import json
import requests
import jsonpointer
from . import config
from . import exceptions
# Get descriptor base path
def get_descriptor_base_path(descriptor):
"""Get descriptor base path if string or return None.
"""
# Infer from path/url
if isinstance(descriptor, six.string_types):
if os.path.exists(descriptor):
base_path = os.path.dirname(os.path.abspath(descriptor))
else:
# suppose descriptor is a URL
base_path = os.path.dirname(descriptor)
# Current dir by default
else:
base_path = '.'
return base_path
# Retrieve descriptor
def retrieve_descriptor(descriptor):
"""Retrieve descriptor.
"""
the_descriptor = descriptor
if the_descriptor is None:
the_descriptor = {}
if isinstance(the_descriptor, six.string_types):
try:
if os.path.isfile(the_descriptor):
with open(the_descriptor, 'r') as f:
the_descriptor = json.load(f)
else:
req = requests.get(the_descriptor)
req.raise_for_status()
# Force UTF8 encoding for 'text/plain' sources
req.encoding = 'utf8'
the_descriptor = req.json()
except (IOError, requests.exceptions.RequestException) as error:
message = 'Unable to load JSON at "%s"' % descriptor
six.raise_from(exceptions.DataPackageException(message), error)
except ValueError as error:
# Python2 doesn't have json.JSONDecodeError (use ValueErorr)
message = 'Unable to parse JSON at "%s". %s' % (descriptor, error)
six.raise_from(exceptions.DataPackageException(message), error)
if hasattr(the_descriptor, 'read'):
try:
the_descriptor = json.load(the_descriptor)
except ValueError as e:
six.raise_from(exceptions.DataPackageException(str(e)), e)
if not isinstance(the_descriptor, dict):
msg = 'Data must be a \'dict\', but was a \'{0}\''
raise exceptions.DataPackageException(msg.format(type(the_descriptor).__name__))
return the_descriptor
# Dereference descriptor
def dereference_package_descriptor(descriptor, base_path):
"""Dereference data package descriptor (IN-PLACE FOR NOW).
"""
for resource in descriptor.get('resources', []):
dereference_resource_descriptor(resource, base_path, descriptor)
return descriptor
def dereference_resource_descriptor(descriptor, base_path, base_descriptor=None):
"""Dereference resource descriptor (IN-PLACE FOR NOW).
"""
PROPERTIES = ['schema', 'dialect']
if base_descriptor is None:
base_descriptor = descriptor
for property in PROPERTIES:
value = descriptor.get(property)
# URI -> No
if not isinstance(value, six.string_types):
continue
# URI -> Pointer
if value.startswith('#'):
try:
pointer = jsonpointer.JsonPointer(value[1:])
descriptor[property] = pointer.resolve(base_descriptor)
except Exception as error:
message = 'Not resolved Pointer URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
# URI -> Remote
elif base_path.startswith('http') or value.startswith('http'):
try:
fullpath = value
if not value.startswith('http'):
fullpath = os.path.join(base_path, value)
response = requests.get(fullpath)
response.raise_for_status()
descriptor[property] = response.json()
except Exception as error:
message = 'Not resolved Remote URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
# URI -> Local
else:
if not is_safe_path(value):
raise exceptions.DataPackageException(
'Not safe path in Local URI "%s" '
'for resource.%s' % (value, property))
if not base_path:
raise exceptions.DataPackageException(
'Local URI "%s" requires base path '
'for resource.%s' % (value, property))
fullpath = os.path.join(base_path, value)
try:
with io.open(fullpath, encoding='utf-8') as file:
descriptor[property] = json.load(file)
except Exception as error:
message = 'Not resolved Local URI "%s" for resource.%s' % (value, property)
six.raise_from(
exceptions.DataPackageException(message),
error
)
return descriptor
# Expand descriptor
def expand_package_descriptor(descriptor):
"""Apply defaults to data package descriptor (IN-PLACE FOR NOW).
"""
descriptor.setdefault('profile', config.DEFAULT_DATA_PACKAGE_PROFILE)
for resource in descriptor.get('resources', []):
expand_resource_descriptor(resource)
return descriptor
def expand_resource_descriptor(descriptor):
"""Apply defaults to resource descriptor (IN-PLACE FOR NOW).
"""
descriptor.setdefault('profile', config.DEFAULT_RESOURCE_PROFILE)
if descriptor['profile'] == 'tabular-data-resource':
# Schema
schema = descriptor.get('schema')
if schema is not None:
for field in schema.get('fields', []):
field.setdefault('type', config.DEFAULT_FIELD_TYPE)
field.setdefault('format', config.DEFAULT_FIELD_FORMAT)
schema.setdefault('missingValues', config.DEFAULT_MISSING_VALUES)
# Dialect
dialect = descriptor.get('dialect')
if dialect is not None:
for key, value in config.DEFAULT_DIALECT.items():
dialect.setdefault(key, value)
return descriptor
# Miscellaneous
def ensure_dir(path):
"""Ensure directory exists.
"""
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
def is_safe_path(path):
"""Check if path is safe and allowed.
"""
contains_windows_var = lambda val: re.match(r'%.+%', val)
contains_posix_var = lambda val: re.match(r'\$.+', val)
unsafeness_conditions = [
os.path.isabs(path),
('..%s' % os.path.sep) in path,
path.startswith('~'),
os.path.expandvars(path) != path,
contains_windows_var(path),
contains_posix_var(path),
]
return not any(unsafeness_conditions)
def extract_sha256_hash(hash):
"""Extrach SHA256 hash or return None
"""
prefix = 'sha256:'
if hash and hash.startswith(prefix):
return hash.replace(prefix, '')
return None
| |
import pytest
import numpy as np
from scipy import stats
from stingray.stats import *
@pytest.mark.parametrize('ntrial', [1, 10, 100, 1000, 10000, 100000])
def test_p_single_from_multi(ntrial):
epsilon_1 = 0.00000001
epsilon_n = p_multitrial_from_single_trial(epsilon_1, ntrial)
epsilon_1_corr = \
p_single_trial_from_p_multitrial(epsilon_n, ntrial)
assert np.isclose(epsilon_1_corr, epsilon_1, rtol=1e-2)
def test_p_single_from_multi_fails():
epsilon_n = 1
with pytest.warns(UserWarning) as record:
p1 = p_single_trial_from_p_multitrial(epsilon_n, 1000)
assert np.any(["Multi-trial probability is very close to 1"
in r.message.args[0] for r in record])
assert np.isnan(p1)
def test_fold_detection_level():
"""Test pulse phase calculation, frequency only."""
np.testing.assert_almost_equal(fold_detection_level(16, 0.01),
30.577914166892498)
epsilon_corr = p_single_trial_from_p_multitrial(0.01, 2)
np.testing.assert_almost_equal(
fold_detection_level(16, 0.01, ntrial=2),
fold_detection_level(16, epsilon_corr))
def test_zn_detection_level():
np.testing.assert_almost_equal(z2_n_detection_level(2),
13.276704135987625)
epsilon_corr = p_single_trial_from_p_multitrial(0.01, 2)
np.testing.assert_almost_equal(z2_n_detection_level(4, 0.01, ntrial=2),
z2_n_detection_level(4, epsilon_corr))
@pytest.mark.parametrize('ntrial', [1, 10, 100, 1000, 100000])
def test_fold_probability(ntrial):
detlev = fold_detection_level(16, 0.1, ntrial=ntrial)
np.testing.assert_almost_equal(fold_profile_probability(detlev, 16,
ntrial=ntrial),
0.1)
@pytest.mark.parametrize('ntrial', [1, 10, 100, 1000, 100000])
def test_zn_probability(ntrial):
detlev = z2_n_detection_level(2, 0.1, ntrial=ntrial)
np.testing.assert_almost_equal(z2_n_probability(detlev, 2, ntrial=ntrial),
0.1)
class TestClassicalSignificances(object):
def test_function_runs(self):
power = 2.0
nspec = 1.0
with pytest.warns(DeprecationWarning):
classical_pvalue(power, nspec)
def test_power_is_not_infinite(self):
power = np.inf
nspec = 1
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_power_is_not_infinite2(self):
power = -np.inf
nspec = 1
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_power_is_non_nan(self):
power = np.nan
nspec = 1
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_power_is_positive(self):
power = -2.0
nspec = 1.0
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_nspec_is_not_infinite(self):
power = 2.0
nspec = np.inf
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_nspec_is_not_infinite2(self):
power = 2.0
nspec = -np.inf
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_nspec_is_not_nan(self):
power = 2.0
nspec = np.nan
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_nspec_is_positive(self):
power = 2.0
nspec = -1.0
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_nspec_is_nonzero(self):
power = 2.0
nspec = 0.0
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_nspec_is_an_integer_number(self):
power = 2.0
nspec = 2.5
with pytest.warns(DeprecationWarning):
with pytest.raises(ValueError):
classical_pvalue(power, nspec)
def test_nspec_float_type_okay(self):
power = 2.0
nspec = 2.0
with pytest.warns(DeprecationWarning):
classical_pvalue(power, nspec)
def test_pvalue_decreases_with_increasing_power(self):
power1 = 2.0
power2 = 20.0
nspec = 1.0
with pytest.warns(DeprecationWarning):
pval1 = classical_pvalue(power1, nspec)
with pytest.warns(DeprecationWarning):
pval2 = classical_pvalue(power2, nspec)
assert pval1 - pval2 > 0.0
def test_pvalue_must_decrease_with_increasing_nspec(self):
power = 3.0
nspec1 = 1.0
nspec2 = 10.0
with pytest.warns(DeprecationWarning):
pval1 = classical_pvalue(power, nspec1)
with pytest.warns(DeprecationWarning):
pval2 = classical_pvalue(power, nspec2)
assert pval1 - pval2 > 0.0
def test_very_large_powers_produce_zero_prob(self):
power = 31000.0
nspec = 1
with pytest.warns(DeprecationWarning):
pval = classical_pvalue(power, nspec)
assert np.isclose(pval, 0.0)
def test_equivalent_Nsigma_logp(self):
pvalues = [0.15865525393145707, 0.0013498980316301035,
9.865877004244794e-10,
6.661338147750939e-16,
3.09e-138]
log_pvalues = np.log(np.array(pvalues))
sigmas = np.array([1, 3, 6, 8, 25])
# Single number
assert np.isclose(equivalent_gaussian_Nsigma_from_logp(log_pvalues[0]),
sigmas[0], atol = 0.01)
# Array
assert np.allclose(equivalent_gaussian_Nsigma_from_logp(log_pvalues),
sigmas, atol = 0.01)
def test_chi2_logp(self):
chi2 = 31
# Test check on dof
with pytest.raises(ValueError) as excinfo:
chi2_logp(chi2, 1)
message = str(excinfo.value)
assert "The number of degrees of freedom cannot be < 2" in message
# Test that approximate function works as expected. chi2 / dof > 15,
# but small and safe number in order to compare to scipy.stats
assert np.isclose(chi2_logp(chi2, 2), stats.chi2.logsf(chi2, 2),
atol=0.1)
chi2 = np.array([5, 32])
assert np.allclose(chi2_logp(chi2, 2), stats.chi2.logsf(chi2, 2),
atol=0.1)
@pytest.mark.parametrize('nbin', [8, 16, 23, 72])
def test_compare_fold_logprob_with_prob(self, nbin):
stat = np.random.uniform(5, 200, 5)
logp = fold_profile_logprobability(stat, nbin)
p = fold_profile_probability(stat, nbin)
assert np.allclose(logp, np.log(p))
@pytest.mark.parametrize('n', [2, 16, 23, 72])
def test_compare_z2n_logprob_with_prob(self, n):
stat = np.random.uniform(5, 200, 5)
logp = z2_n_logprobability(stat, n=n)
p = z2_n_probability(stat, n=n)
assert np.allclose(logp, np.log(p))
def test_power_upper_limit(self):
# Use example from Vaughan+94
assert np.isclose(power_upper_limit(40, 1, 0.99), 75, rtol=0.1)
| |
from __future__ import absolute_import, division
from collections import defaultdict
import numpy as np
import pandas as pd
from bokeh.charts import DEFAULT_PALETTE
from bokeh.core.enums import DashPattern
from bokeh.models.glyphs import Rect, Segment, Line, Patches, Arc
from bokeh.models.renderers import GlyphRenderer
from bokeh.core.properties import (Float, String, Datetime, Bool, Instance,
List, Either, Int, Enum, Color, Override, Any, Angle)
from .models import CompositeGlyph
from .properties import Column, EitherColumn
from .stats import (Stat, Quantile, Sum, Min, Max, Bins, stats, Histogram,
BinnedStat)
from .data_source import ChartDataSource
from .utils import marker_types, generate_patch_base, label_from_index_dict
class NestedCompositeGlyph(CompositeGlyph):
"""A composite glyph that consists of other composite glyphs.
An important responsibility of any `CompositeGlyph` is to understand the bounds
of the glyph renderers that make it up. This class is used to provide convenient
properties that return the bounds from the child `CompositeGlyphs`.
"""
children = List(Instance(CompositeGlyph))
@property
def y_max(self):
return max([renderer.y_max for renderer in self.children])
@property
def y_min(self):
return min([renderer.y_min for renderer in self.children])
@property
def x_min(self):
return min([renderer.x_min for renderer in self.children])
@property
def x_max(self):
return max([renderer.x_max for renderer in self.children])
class XyGlyph(CompositeGlyph):
"""Composite glyph that plots in cartesian coordinates."""
x = EitherColumn(String, Column(Float), Column(String), Column(Datetime), Column(Bool))
y = EitherColumn(String, Column(Float), Column(String), Column(Datetime), Column(Bool))
def build_source(self):
labels = self._build_label_array(('x', 'y'), self.label)
str_labels = [str(label) for label in labels]
if self.x is None:
data = dict(x_values=str_labels, y_values=self.y)
elif self.y is None:
data = dict(x_values=self.x, y_values=str_labels)
else:
data = dict(x_values=self.x, y_values=self.y)
return data
def _build_label_array(self, props, value):
for prop in props:
if getattr(self, prop) is not None:
return [value] * len(getattr(self, prop))
@property
def x_max(self):
# TODO(fpliger): since CompositeGlyphs are not exposed in general we
# should expect to always have a Series but in case
# it's not we just use the default min/max instead
# of just failing. When/If we end up exposing
# CompositeGlyphs we should consider making this
# more robust (either enforcing data or checking)
try:
return self.source.data['x_values'].max()
except AttributeError:
return max(self.source.data['x_values'])
@property
def x_min(self):
try:
return self.source.data['x_values'].min()
except AttributeError:
return min(self.source.data['x_values'])
@property
def y_max(self):
try:
return self.source.data['y_values'].max()
except AttributeError:
return max(self.source.data['y_values'])
@property
def y_min(self):
try:
return self.source.data['y_values'].min()
except AttributeError:
return min(self.source.data['y_values'])
class PointGlyph(XyGlyph):
"""A set of glyphs placed in x,y coordinates with the same attributes."""
fill_color = Override(default=DEFAULT_PALETTE[1])
fill_alpha = Override(default=0.7)
marker = String(default='circle')
size = Float(default=8)
def __init__(self, x=None, y=None, color=None, line_color=None, fill_color=None,
marker=None, size=None, **kwargs):
kwargs['x'] = x
kwargs['y'] = y
if marker is not None: kwargs['marker'] = marker
if size is not None: kwargs['size'] = size
if color:
line_color = color
fill_color = color
kwargs['line_color'] = line_color
kwargs['fill_color'] = fill_color
super(PointGlyph, self).__init__(**kwargs)
self.setup()
def get_glyph(self):
return marker_types[self.marker]
def build_renderers(self):
glyph_type = self.get_glyph()
glyph = glyph_type(x='x_values', y='y_values',
line_color=self.line_color,
fill_color=self.fill_color,
size=self.size,
fill_alpha=self.fill_alpha,
line_alpha=self.line_alpha)
yield GlyphRenderer(glyph=glyph)
class LineGlyph(XyGlyph):
"""Represents a group of data as a line."""
width = Int(default=2)
dash = Enum(DashPattern, default='solid')
def __init__(self, x=None, y=None, color=None, line_color=None,
width=None, dash=None, **kwargs):
kwargs['x'] = x
kwargs['y'] = y
if color is not None and line_color is None:
line_color = color
if dash is not None:
kwargs['dash'] = dash
if width is not None:
kwargs['width'] = width
if line_color is not None:
kwargs['line_color'] = line_color
super(LineGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
if self.x is None:
x = self.y.index
data = dict(x_values=x, y_values=self.y)
elif self.y is None:
y = self.x.index
data = dict(x_values=self.x, y_values=y)
else:
data = dict(x_values=self.x, y_values=self.y)
return data
def build_renderers(self):
"""Yield a `GlyphRenderer` for the group of data."""
glyph = Line(x='x_values', y='y_values',
line_color=self.line_color,
line_alpha=self.line_alpha,
line_width=self.width,
line_dash=self.dash)
yield GlyphRenderer(glyph=glyph)
class AreaGlyph(LineGlyph):
# ToDo: should these be added to composite glyph?
stack = Bool(default=False)
dodge = Bool(default=False)
base = Float(default=0.0, help="""Lower bound of area.""")
def __init__(self, **kwargs):
line_color = kwargs.get('line_color', None)
fill_color = kwargs.get('fill_color', None)
color = kwargs.get('color', None)
if color is not None:
# apply color to line and fill
kwargs['fill_color'] = color
kwargs['line_color'] = color
elif line_color is not None and fill_color is None:
# apply line color to fill color by default
kwargs['fill_color'] = line_color
super(AreaGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
data = super(AreaGlyph, self).build_source()
x0, y0 = generate_patch_base(pd.Series(list(data['x_values'])),
pd.Series(list(data['y_values'])))
data['x_values'] = [x0]
data['y_values'] = [y0]
return data
def build_renderers(self):
# parse all series. We exclude the first attr as it's the x values
# added for the index
glyph = Patches(
xs='x_values', ys='y_values',
fill_alpha=self.fill_alpha, fill_color=self.fill_color,
line_color=self.line_color
)
renderer = GlyphRenderer(data_source=self.source, glyph=glyph)
yield renderer
def __stack__(self, glyphs):
# ToDo: need to handle case of non-aligned indices, see pandas concat
# ToDo: need to address how to aggregate on an index when required
# build a list of series
areas = []
for glyph in glyphs:
areas.append(pd.Series(glyph.source.data['y_values'][0],
index=glyph.source.data['x_values'][0]))
# concat the list of indexed y values into dataframe
df = pd.concat(areas, axis=1)
# calculate stacked values along the rows
stacked_df = df.cumsum(axis=1)
# lower bounds of each area series are diff between stacked and orig values
lower_bounds = stacked_df - df
# reverse the df so the patch is drawn in correct order
lower_bounds = lower_bounds.iloc[::-1]
# concat the upper and lower bounds together
stacked_df = pd.concat([stacked_df, lower_bounds])
# update the data in the glyphs
for i, glyph in enumerate(glyphs):
glyph.source.data['x_values'] = [stacked_df.index.values]
glyph.source.data['y_values'] = [stacked_df.ix[:, i].values]
def get_nested_extent(self, col, func):
return [getattr(arr, func)() for arr in self.source.data[col]]
@property
def x_max(self):
return max(self.get_nested_extent('x_values', 'max'))
@property
def x_min(self):
return min(self.get_nested_extent('x_values', 'min'))
@property
def y_max(self):
return max(self.get_nested_extent('y_values', 'max'))
@property
def y_min(self):
return min(self.get_nested_extent('y_values', 'min'))
class HorizonGlyph(AreaGlyph):
num_folds = Int(default=3, help="""The count of times the data is overlapped.""")
series = Int(default=0, help="""The id of the series as the order it will appear,
starting from 0.""")
series_count = Int()
fold_height = Float(help="""The height of one fold.""")
bins = List(Float, help="""The binedges calculated from the number of folds,
and the maximum value of the entire source data.""")
graph_ratio = Float(help="""Scales heights of each series based on number of folds
and the number of total series being plotted.
""")
pos_color = Color("#006400", help="""The color used for positive values.""")
neg_color = Color("#6495ed", help="""The color used for negative values.""")
flip_neg = Bool(default=True, help="""When True, the negative values will be
plotted as their absolute value, then their individual axes is flipped. If False,
then the negative values will still be taken as their absolute value, but the base
of their shape will start from the same origin as the positive values.
""")
def __init__(self, bins=None, **kwargs):
# fill alpha depends on how many folds will be layered
kwargs['fill_alpha'] = 1.0/kwargs['num_folds']
if bins is not None:
kwargs['bins'] = bins
# each series is shifted up to a synthetic y-axis
kwargs['base'] = kwargs['series'] * max(bins) / kwargs['series_count']
kwargs['graph_ratio'] = float(kwargs['num_folds'])/float(kwargs['series_count'])
super(HorizonGlyph, self).__init__(**kwargs)
def build_source(self):
data = {}
# Build columns for the positive values
pos_y = self.y.copy()
pos_y[pos_y < 0] = 0
xs, ys = self._build_dims(self.x, pos_y)
# list of positive colors and alphas
colors = [self.pos_color] * len(ys)
alphas = [(bin_idx * self.fill_alpha) for bin_idx in
range(0, len(self.bins))]
# If we have negative values at all, add the values for those as well
if self.y.min() < 0:
neg_y = self.y.copy()
neg_y[neg_y > 0] = 0
neg_y = abs(neg_y)
neg_xs, neg_ys = self._build_dims(self.x, neg_y, self.flip_neg)
xs += neg_xs
ys += neg_ys
colors += ([self.neg_color] * len(neg_ys))
alphas += alphas
# create clipped representation of each band
data['x_values'] = xs
data['y_values'] = ys
data['fill_color'] = colors
data['fill_alpha'] = colors
data['line_color'] = colors
return data
def _build_dims(self, x, y, flip=False):
""" Creates values needed to plot each fold of the horizon glyph.
Bins the data based on the binning passed into the glyph, then copies and clips
the values for each bin.
Args:
x (`pandas.Series`): array of x values
y (`pandas.Series`): array of y values
flip (bool): whether to flip values, used when handling negative values
Returns:
tuple(list(`numpy.ndarray`), list(`numpy.ndarray`)): returns a list of
arrays for the x values and list of arrays for the y values. The data
has been folded and transformed so the patches glyph presents the data
in a way that looks like an area chart.
"""
# assign bins to each y value
bin_idx = pd.cut(y, bins=self.bins, labels=False, include_lowest=True)
xs, ys = [], []
for idx, bin in enumerate(self.bins[0:-1]):
# subtract off values associated with lower bins, to get into this bin
temp_vals = y.copy() - (idx * self.fold_height)
# clip the values between the fold range and zero
temp_vals[bin_idx > idx] = self.fold_height * self.graph_ratio
temp_vals[bin_idx < idx] = 0
temp_vals[bin_idx == idx] = self.graph_ratio * temp_vals[bin_idx == idx]
# if flipping, we must start the values from the top of each fold's range
if flip:
temp_vals = (self.fold_height * self.graph_ratio) - temp_vals
base = self.base + (self.fold_height * self.graph_ratio)
else:
base = self.base
# shift values up based on index of series
temp_vals += self.base
val_idx = temp_vals > 0
if pd.Series.any(val_idx):
ys.append(temp_vals)
xs.append(x)
# transform clipped data so it always starts and ends at its base value
if len(ys) > 0:
xs, ys = map(list, zip(*[generate_patch_base(xx, yy, base=base) for
xx, yy in zip(xs, ys)]))
return xs, ys
def build_renderers(self):
# parse all series. We exclude the first attr as it's the x values
# added for the index
glyph = Patches(
xs='x_values', ys='y_values',
fill_alpha=self.fill_alpha, fill_color='fill_color',
line_color='line_color'
)
renderer = GlyphRenderer(data_source=self.source, glyph=glyph)
yield renderer
class StepGlyph(LineGlyph):
"""Represents a group of data as a stepped line."""
def build_source(self):
x = self.x
y = self.y
if self.x is None:
x = self.y.index
elif self.y is None:
y = self.x.index
dtype = x.dtype if hasattr(x, 'dtype') else np.int
xs = np.empty(2*len(x)-1, dtype=dtype)
xs[::2] = x[:]
xs[1::2] = x[1:]
dtype = y.dtype if hasattr(y, 'dtype') else np.float64
ys = np.empty(2*len(y)-1, dtype=dtype)
ys[::2] = y[:]
ys[1::2] = y[:-1]
data = dict(x_values=xs, y_values=ys)
return data
class AggregateGlyph(NestedCompositeGlyph):
"""A base composite glyph for aggregating an array.
Implements default stacking and dodging behavior that other composite
glyphs can inherit.
"""
x_label = String()
x_label_value = Any()
stack_label = String()
stack_shift = Float(default=0.0)
dodge_label = String(help="""Where on the scale the glyph should be placed.""")
dodge_shift = Float(default=None)
agg = Instance(Stat, default=Sum())
span = Float(help="""The range of values represented by the aggregate.""")
def __init__(self, x_label=None, **kwargs):
label = kwargs.get('label')
if x_label is not None:
kwargs['x_label_value'] = x_label
if not isinstance(x_label, str):
x_label = str(x_label)
kwargs['x_label'] = x_label
elif label is not None:
kwargs['x_label'] = str(label)
super(AggregateGlyph, self).__init__(**kwargs)
def get_dodge_label(self, shift=0.0):
"""Generate the label defining an offset in relation to a position on a scale."""
if self.dodge_shift is None:
shift_str = ':' + str(0.5 + shift)
elif self.dodge_shift is not None:
shift_str = ':' + str(self.dodge_shift + shift)
else:
shift_str = ''
return str(label_from_index_dict(self.x_label)) + shift_str
def filter_glyphs(self, glyphs):
"""Return only the glyphs that are of the same class."""
return [glyph for glyph in glyphs if isinstance(glyph, self.__class__)]
@staticmethod
def groupby(glyphs, prop):
"""Returns a dict of `CompositeGlyph`s, grouped by unique values of prop.
For example, if all glyphs had a value of 'a' or 'b' for glyph.prop, the dict
would contain two keys, 'a' and 'b', where each value is a list of the glyphs
that had each of the values.
"""
grouped = defaultdict(list)
labels = [getattr(glyph, prop) for glyph in glyphs]
labels = [tuple(label.values()) if isinstance(label, dict) else label for label
in labels]
[grouped[label].append(glyph) for label, glyph in zip(labels, glyphs)]
labels = pd.Series(labels).drop_duplicates().values
return labels, grouped
def __stack__(self, glyphs):
"""Apply relative shifts to the composite glyphs for stacking."""
filtered_glyphs = self.filter_glyphs(glyphs)
labels, grouped = self.groupby(filtered_glyphs, 'x_label')
for label in labels:
group = grouped[label]
# separate the negative and positive aggregates into separate groups
neg_group = [glyph for glyph in group if glyph.span < 0]
pos_group = [glyph for glyph in group if glyph.span >= 0]
# apply stacking to each group separately
for group in [neg_group, pos_group]:
shift = []
for i, glyph in enumerate(group):
# save off the top of each rect's height
shift.append(glyph.span)
if i > 0:
glyph.stack_shift = sum(shift[0:i])
glyph.refresh()
def __dodge__(self, glyphs):
"""Apply relative shifts to the composite glyphs for dodging."""
if self.dodge_label is not None:
filtered_glyphs = self.filter_glyphs(glyphs)
labels, grouped = self.groupby(filtered_glyphs, 'dodge_label')
# calculate transformations
step = np.linspace(0, 1.0, len(grouped.keys()) + 1, endpoint=False)
width = min(0.2, (1. / len(grouped.keys())) ** 1.1)
# set bar attributes and re-aggregate
for i, label in enumerate(labels):
group = grouped[label]
for glyph in group:
glyph.dodge_shift = step[i + 1]
glyph.width = width
glyph.refresh()
class Interval(AggregateGlyph):
"""A rectangle representing aggregated values.
The interval is a rect glyph where two of the parallel sides represent a
summary of values. Each of the two sides is derived from a separate aggregation of
the values provided to the interval.
.. note::
A bar is a special case interval where one side is pinned and used to
communicate a value relative to it.
"""
width = Float(default=0.8)
start_agg = Either(Instance(Stat), Enum(*list(stats.keys())), default=Min(), help="""
The stat used to derive the starting point of the composite glyph.""")
end_agg = Either(Instance(Stat), Enum(*list(stats.keys())), default=Max(), help="""
The stat used to derive the end point of the composite glyph.""")
start = Float(default=0.0)
end = Float()
def __init__(self, label, values, **kwargs):
kwargs['label'] = label
kwargs['values'] = values
super(Interval, self).__init__(**kwargs)
self.setup()
def get_start(self):
"""Get the value for the start of the glyph."""
if len(self.values.index) == 1:
self.start_agg = None
return self.values[0]
elif isinstance(self.start_agg, str):
self.start_agg = stats[self.start_agg]()
self.start_agg.set_data(self.values)
return self.start_agg.value
def get_end(self):
"""Get the value for the end of the glyph."""
if isinstance(self.end_agg, str):
self.end_agg = stats[self.end_agg]()
self.end_agg.set_data(self.values)
return self.end_agg.value
def get_span(self):
"""The total range between the start and end."""
return self.end - self.start
def build_source(self):
# ToDo: Handle rotation
self.start = self.get_start()
self.end = self.get_end()
self.span = self.get_span()
width = [self.width]
if self.dodge_shift is not None:
x = [self.get_dodge_label()]
else:
x = [self.x_label]
height = [self.span]
y = [self.stack_shift + (self.span / 2.0) + self.start]
color = [self.color]
fill_alpha = [self.fill_alpha]
line_color = [self.line_color]
line_alpha = [self.line_alpha]
label = [self.label]
return dict(x=x, y=y, width=width, height=height, color=color,
fill_alpha=fill_alpha, line_color=line_color,
line_alpha=line_alpha, label=label)
@property
def x_max(self):
"""The maximum extent of the glyph in x.
.. note::
Dodging the glyph can affect the value.
"""
return (self.dodge_shift or self.x_label_value) + (self.width / 2.0)
@property
def x_min(self):
"""The maximum extent of the glyph in y.
.. note::
Dodging the glyph can affect the value.
"""
return (self.dodge_shift or self.x_label_value) - (self.width / 2.0)
@property
def y_max(self):
"""Maximum extent of all `Glyph`s.
How much we are stacking + the height of the interval + the base of the interval
.. note::
the start and end of the glyph can swap between being associated with the
min and max when the glyph end represents a negative value.
"""
return max(self.bottom, self.top)
@property
def y_min(self):
"""The minimum extent of all `Glyph`s in y.
.. note::
the start and end of the glyph can swap between being associated with the
min and max when the glyph end represents a negative value.
"""
return min(self.bottom, self.top)
@property
def bottom(self):
"""The value associated with the start of the stacked glyph."""
return self.stack_shift + self.start
@property
def top(self):
"""The value associated with the end of the stacked glyph."""
return self.stack_shift + self.span + self.start
def build_renderers(self):
"""Yields a `GlyphRenderer` associated with a `Rect` glyph."""
glyph = Rect(x='x', y='y', width='width', height='height', fill_color='color',
fill_alpha='fill_alpha', line_color='line_color')
yield GlyphRenderer(glyph=glyph)
class BarGlyph(Interval):
"""Special case of Interval where the span represents a value.
A bar always begins from 0, or the value that is being compared to, and
extends to some positive or negative value.
"""
def __init__(self, label, values, agg='sum', **kwargs):
kwargs['end_agg'] = agg
kwargs['start_agg'] = None
super(BarGlyph, self).__init__(label, values, **kwargs)
self.setup()
def get_start(self):
return 0.0
class DotGlyph(Interval):
"""Special case of Interval where the span represents a value.
A bar always begins from 0, or the value that is being compared to, and
extends to some positive or negative value.
"""
marker = String(default='circle')
size = Float(default=8)
stem = Bool(False, help="""
Whether to draw a stem from each do to the axis.
""")
stem_line_width = Float(default=1)
stem_color = String(default='black')
def __init__(self, label, values, agg='sum', **kwargs):
kwargs['end_agg'] = agg
super(DotGlyph, self).__init__(label, values, **kwargs)
self.setup()
def get_start(self):
return 0.0
def get_glyph(self):
return marker_types[self.marker]
def build_renderers(self):
if self.stem:
yield GlyphRenderer(glyph=Segment(
x0='x', y0=0, x1='x', y1='height',
line_width=self.stem_line_width,
line_color=self.stem_color,
line_alpha='fill_alpha')
)
glyph_type = self.get_glyph()
glyph = glyph_type(x='x', y='height',
line_color=self.line_color,
fill_color=self.color,
size=self.size,
fill_alpha='fill_alpha',
line_alpha='line_alpha'
)
yield GlyphRenderer(glyph=glyph)
class QuartileGlyph(Interval):
"""An interval that has start and end aggregations of quartiles."""
def __init__(self, label, values, interval1, interval2, **kwargs):
kwargs['label'] = label
kwargs['values'] = values
kwargs['start_agg'] = Quantile(interval=interval1)
kwargs['end_agg'] = Quantile(interval=interval2)
super(QuartileGlyph, self).__init__(**kwargs)
self.setup()
class BoxGlyph(AggregateGlyph):
"""Summarizes the distribution with a collection of glyphs.
A box glyph produces one "box" for a given array of vales. The box
is made up of multiple other child composite glyphs (intervals,
scatter) and directly produces glyph renderers for the whiskers,
as well.
"""
q1 = Float(help="""Derived value for 25% of all values.""")
q2 = Float(help="""Derived value for 50% of all values.""")
q3 = Float(help="""Derived value for 75% of all values.""")
iqr = Float()
w0 = Float(help='Lower whisker')
w1 = Float(help='Upper whisker')
q2_glyph = Instance(QuartileGlyph)
q3_glyph = Instance(QuartileGlyph)
whisker_glyph = Instance(GlyphRenderer)
outliers = Either(Bool, Instance(PointGlyph))
marker = String(default='circle')
whisker_width = Float(default=0.3)
whisker_line_width = Float(default=2)
whisker_span_line_width = Float(default=2)
whisker_color = String(default='black')
outlier_fill_color = String(default='red')
outlier_line_color = String(default='red')
outlier_size = Float(default=5)
bar_color = String(default='DimGrey')
def __init__(self, label, values, outliers=True, **kwargs):
width = kwargs.pop('width', None)
bar_color = kwargs.pop('color', None) or kwargs.get('bar_color', None) or self.lookup('bar_color').class_default()
kwargs['outliers'] = kwargs.pop('outliers', None) or outliers
kwargs['label'] = label
kwargs['values'] = values
x_label = kwargs.get('x_label')
kwargs['q2_glyph'] = QuartileGlyph(label=label, x_label=x_label, values=values,
interval1=0.25, interval2=0.5, width=width,
color=bar_color)
kwargs['q3_glyph'] = QuartileGlyph(label=label, x_label=x_label, values=values,
interval1=0.5, interval2=0.75, width=width,
color=bar_color)
super(BoxGlyph, self).__init__(**kwargs)
self.setup()
def build_renderers(self):
"""Yields all renderers that make up the BoxGlyph."""
self.calc_quartiles()
outlier_values = self.values[((self.values < self.w0) | (self.values > self.w1))]
self.whisker_glyph = GlyphRenderer(glyph=Segment(x0='x0s', y0='y0s', x1='x1s', y1='y1s',
line_width=self.whisker_line_width,
line_color=self.whisker_color))
if len(outlier_values) > 0 and self.outliers:
self.outliers = PointGlyph(label=self.label, y=outlier_values,
x=[self.get_dodge_label()] * len(outlier_values),
line_color=self.outlier_line_color,
fill_color=self.outlier_fill_color,
size=self.outlier_size, marker=self.marker)
for comp_glyph in self.composite_glyphs:
for renderer in comp_glyph.renderers:
yield renderer
yield self.whisker_glyph
def calc_quartiles(self):
"""Sets all derived stat properties of the BoxGlyph."""
self.q1 = self.q2_glyph.start
self.q2 = self.q2_glyph.end
self.q3 = self.q3_glyph.end
self.iqr = self.q3 - self.q1
mx = Max()
mx.set_data(self.values)
mn = Min()
mn.set_data(self.values)
self.w0 = max(self.q1 - (1.5 * self.iqr), mn.value)
self.w1 = min(self.q3 + (1.5 * self.iqr), mx.value)
def build_source(self):
"""Calculate stats and builds and returns source for whiskers."""
self.calc_quartiles()
x_label = self.get_dodge_label()
x_w0_label = self.get_dodge_label(shift=(self.whisker_width / 2.0))
x_w1_label = self.get_dodge_label(shift=-(self.whisker_width / 2.0))
# span0, whisker bar0, span1, whisker bar1
x0s = [x_label, x_w0_label, x_label, x_w0_label]
y0s = [self.w0, self.w0, self.q3, self.w1]
x1s = [x_label, x_w1_label, x_label, x_w1_label]
y1s = [self.q1, self.w0, self.w1, self.w1]
return dict(x0s=x0s, y0s=y0s, x1s=x1s, y1s=y1s)
def _set_sources(self):
"""Set the column data source on the whisker glyphs."""
self.whisker_glyph.data_source = self.source
def get_extent(self, func, prop_name):
return func([getattr(renderer, prop_name) for renderer in self.composite_glyphs])
@property
def composite_glyphs(self):
"""Returns list of composite glyphs, excluding the regular glyph renderers."""
comp_glyphs = [self.q2_glyph, self.q3_glyph]
if isinstance(self.outliers, PointGlyph):
comp_glyphs.append(self.outliers)
return comp_glyphs
@property
def x_max(self):
return self.get_extent(max, 'x_max') + self.right_buffer
@property
def x_min(self):
return self.get_extent(min, 'x_min') - self.left_buffer
@property
def y_max(self):
return max(self.w1, self.get_extent(max, 'y_max')) + self.top_buffer
@property
def y_min(self):
return min(self.w0, self.get_extent(min, 'y_min')) - self.bottom_buffer
class HistogramGlyph(AggregateGlyph):
"""Depicts the distribution of values using rectangles created by binning.
The histogram represents a distribution, so will likely include other
options for displaying it, such as KDE and cumulative density.
"""
# derived models
bins = Instance(BinnedStat, help="""A stat used to calculate the bins. The bins stat
includes attributes about each composite bin.""")
bars = List(Instance(BarGlyph), help="""The histogram is comprised of many
BarGlyphs that are derived from the values.""")
density = Bool(False, help="""
Whether to normalize the histogram.
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check :class:`~bokeh.charts.stats.Histogram` documentation.
(default: False)
""")
def __init__(self, values, label=None, color=None, bins=None, **kwargs):
if label is not None:
kwargs['label'] = label
kwargs['values'] = values
if color is not None:
kwargs['color'] = color
# remove width, since this is handled automatically
kwargs.pop('width', None)
# keep original bins setting private since it just needs to be
# delegated to the Histogram stat
self._bins = bins
super(HistogramGlyph, self).__init__(**kwargs)
self.setup()
def _set_sources(self):
# No need to set sources, since composite glyphs handle this
pass
def build_source(self):
# No need to build source, since composite glyphs handle this
return None
def build_renderers(self):
"""Yield a bar glyph for each bin."""
# TODO(fpliger): We should expose the bin stat class so we could let
# users specify other bins other the Histogram Stat
self.bins = Histogram(values=self.values, bins=self._bins,
density=self.density)
bars = []
for bin in self.bins.bins:
bars.append(BarGlyph(label=bin.label[0], x_label=bin.center,
values=bin.values, color=self.color,
fill_alpha=self.fill_alpha,
agg=bin.stat, width=bin.width))
# provide access to bars as children for bounds properties
self.bars = self.children = bars
for comp_glyph in self.bars:
for renderer in comp_glyph.renderers:
yield renderer
@property
def y_min(self):
return 0.0
class BinGlyph(XyGlyph):
"""Represents a group of data that was aggregated and is represented by a glyph.
"""
bins = Instance(Bins)
column = String()
stat = String()
glyph_name = String()
width = Float()
height = Float()
def __init__(self, x, y, values, column=None, stat='count', glyph='rect', width=1,
height=1, **kwargs):
df = pd.DataFrame(dict(x_vals=x, y_vals=y, values_vals=values))
df.drop_duplicates(inplace=True)
kwargs['x'] = df.x_vals
kwargs['y'] = df.y_vals
kwargs['values'] = df.values_vals
kwargs['column'] = column
kwargs['stat'] = stat
kwargs['glyph_name'] = glyph
kwargs['height'] = height
kwargs['width'] = width
if 'glyphs' not in kwargs:
kwargs['glyphs'] = {'rect': Rect}
super(XyGlyph, self).__init__(**kwargs)
self.setup()
def build_source(self):
return {'x': self.x, 'y': self.y, 'values': self.values}
def build_renderers(self):
glyph_class = self.glyphs[self.glyph_name]
glyph = glyph_class(x='x', y='y', height=self.height, width=self.width,
fill_color=self.fill_color, line_color=self.line_color,
dilate=True)
yield GlyphRenderer(glyph=glyph)
@property
def x_max(self):
return self.get_data_range('x')[1] + self.width / 2.0
@property
def x_min(self):
return self.get_data_range('x')[0] - self.width / 2.0
@property
def y_max(self):
return self.get_data_range('y')[1] + self.height / 2.0
@property
def y_min(self):
return self.get_data_range('y')[0] - self.height / 2.0
def get_data_range(self, col):
data = self.source.data[col]
if ChartDataSource.is_number(data):
return min(data), max(data)
else:
return 1, len(data.drop_duplicates())
class ArcGlyph(LineGlyph):
"""Represents a group of data as an arc."""
start_angle = Angle()
end_angle = Angle()
def __init__(self, **kwargs):
super(self.__class__, self).__init__(**kwargs)
self.setup()
def build_renderers(self):
"""Yield a `GlyphRenderer` for the group of data."""
glyph = Arc(x='x', y='y', radius=1,
start_angle='_end_angle',
end_angle='_start_angle',
line_color='line_color')
yield GlyphRenderer(glyph=glyph)
| |
import json
import os
import uuid
from typing import Any, Dict, List, Optional
import click
from chaoslib import __version__ as chaoslib_version
from chaoslib import convert_vars, merge_vars
from chaoslib.control import load_global_controls
from chaoslib.discovery import discover as disco
from chaoslib.discovery.discover import portable_type_name_to_python_type
from chaoslib.exceptions import ChaosException, DiscoveryFailed, InvalidSource
from chaoslib.experiment import ensure_experiment_is_valid, run_experiment
from chaoslib.info import list_extensions
from chaoslib.loader import load_experiment
from chaoslib.notification import (
DiscoverFlowEvent,
InitFlowEvent,
RunFlowEvent,
ValidateFlowEvent,
notify,
)
from chaoslib.settings import (
CHAOSTOOLKIT_CONFIG_PATH,
load_settings,
locate_settings_entry,
save_settings,
)
from chaoslib.types import Activity, Discovery, Dry, Experiment, Journal, Schedule
from click_plugins import with_plugins
try:
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata
import yaml
from logzero import logger
from chaostoolkit import __version__, encoder
from chaostoolkit.check import check_hypothesis_strategy_spelling, check_newer_version
from chaostoolkit.logging import configure_logger
__all__ = ["cli"]
@click.group()
@click.version_option(version=__version__)
@click.option("--verbose", is_flag=True, help="Display debug level traces.")
@click.option(
"--no-version-check",
is_flag=True,
help="Do not search for an updated version of the chaostoolkit.",
)
@click.option("--change-dir", help="Change directory before running experiment.")
@click.option("--no-log-file", is_flag=True, help="Disable logging to file entirely.")
@click.option(
"--log-file",
default="chaostoolkit.log",
show_default=True,
help="File path where to write the command's log.",
)
@click.option(
"--log-file-level",
default="debug",
show_default=False,
help="File logging level: debug, info, warning, error",
type=click.Choice(["debug", "info", "warning", "error"]),
)
@click.option(
"--log-format",
default="string",
show_default=False,
help="Console logging format: string, json.",
type=click.Choice(["string", "json"]),
)
@click.option(
"--settings",
default=CHAOSTOOLKIT_CONFIG_PATH,
show_default=True,
help="Path to the settings file.",
)
@click.pass_context
def cli(
ctx: click.Context,
verbose: bool = False,
no_version_check: bool = False,
change_dir: str = None,
no_log_file: bool = False,
log_file: str = "chaostoolkit.log",
log_file_level: str = "info",
log_format: str = "string",
settings: str = CHAOSTOOLKIT_CONFIG_PATH,
):
if no_log_file:
configure_logger(
verbose=verbose, log_format=log_format, context_id=str(uuid.uuid4())
)
else:
configure_logger(
verbose=verbose,
log_file=log_file,
log_file_level=log_file_level,
log_format=log_format,
context_id=str(uuid.uuid4()),
)
subcommand = ctx.invoked_subcommand
# make it nicer for going through the log file
logger.debug("#" * 79)
logger.debug(f"Running command '{subcommand}'")
ctx.obj = {}
ctx.obj["settings_path"] = click.format_filename(settings)
logger.debug("Using settings file '{}'".format(ctx.obj["settings_path"]))
if not no_version_check:
check_newer_version(command=subcommand)
if change_dir:
logger.warning(f"Moving to {change_dir}")
os.chdir(change_dir)
def validate_vars(
ctx: click.Context, param: click.Option, value: List[str]
) -> Dict[str, Any]:
"""
Process all `--var key=value` and return a dictionary of them with the
value converted to the appropriate type.
"""
try:
return convert_vars(value)
except ValueError as x:
raise click.BadParameter(str(x))
@cli.command()
@click.option(
"--journal-path",
default="./journal.json",
help="Path where to save the journal from the execution.",
)
@click.option(
"--dry",
type=click.Choice(["probes", "actions", "activities", "pause"]),
show_default=False,
help="Run the experiment without executing the chosen strategy.",
)
@click.option(
"--no-validation",
is_flag=True,
help="Do not validate the experiment before running.",
)
@click.option("--no-verify-tls", is_flag=True, help="Do not verify TLS certificate.")
@click.option(
"--rollback-strategy",
default="default",
show_default=False,
help="Rollback runtime strategy. Default is to never play them "
"on interruption or failed hypothesis.",
type=click.Choice(["default", "always", "never", "deviated"]),
)
@click.option(
"--var",
multiple=True,
callback=validate_vars,
help="Specify substitution values for configuration only. Can "
"be provided multiple times. The pattern must be "
"key=value or key:type=value. In that latter case, the "
"value will be casted as the specified type. Supported "
"types are: int, float, bytes. No type specified means "
"a utf-8 decoded string.",
)
@click.option(
"--var-file",
multiple=True,
type=click.Path(exists=True),
help="Specify files that contain configuration and secret "
"substitution values. Either as a json/yaml payload where "
"each key has a value mapping to a configuration entry. "
"Or a .env file defining environment variables. "
"Can be provided multiple times.",
)
@click.option(
"--hypothesis-strategy",
default="default",
type=click.Choice(
[
"default",
"before-method-only",
"after-method-only",
"during-method-only",
"continuously",
"continously",
],
case_sensitive=True,
),
help="Strategy to execute the hypothesis during the run.",
)
@click.option(
"--hypothesis-frequency",
default=1.0,
type=float,
help="Pace at which running the hypothesis. "
"Only applies when strategy is either: "
"during-method-only or continuously",
)
@click.option(
"--fail-fast",
is_flag=True,
default=False,
help="When running in the during-method-only or continuous "
"strategies, indicate the hypothesis can fail the "
"experiment as soon as it deviates once. Otherwise, keeps "
"running until the end of the experiment.",
)
@click.argument("source")
@click.pass_context
def run(
ctx: click.Context,
source: str,
journal_path: str = "./journal.json",
dry: Optional[str] = None,
no_validation: bool = False,
no_exit: bool = False,
no_verify_tls: bool = False,
rollback_strategy: str = "default",
var: Dict[str, Any] = None,
var_file: List[str] = None,
hypothesis_strategy: str = "default",
hypothesis_frequency: float = 1.0,
fail_fast: bool = False,
) -> Journal:
"""Run the experiment loaded from SOURCE, either a local file or a
HTTP resource. SOURCE can be formatted as JSON or YAML."""
settings = load_settings(ctx.obj["settings_path"]) or {}
has_deviated = False
has_failed = False
experiment_vars = merge_vars(var, var_file)
load_global_controls(settings)
try:
experiment = load_experiment(source, settings, verify_tls=not no_verify_tls)
except InvalidSource as x:
logger.error(str(x))
logger.debug(x)
ctx.exit(1)
notify(settings, RunFlowEvent.RunStarted, experiment)
if not no_validation:
try:
ensure_experiment_is_valid(experiment)
except ChaosException as x:
logger.error(str(x))
logger.debug(x)
ctx.exit(1)
experiment["dry"] = Dry.from_string(dry)
settings.setdefault("runtime", {}).setdefault("rollbacks", {}).setdefault(
"strategy", rollback_strategy
)
hypothesis_strategy = check_hypothesis_strategy_spelling(hypothesis_strategy)
schedule = Schedule(
continuous_hypothesis_frequency=hypothesis_frequency, fail_fast=fail_fast
)
journal = run_experiment(
experiment,
settings=settings,
strategy=hypothesis_strategy,
schedule=schedule,
experiment_vars=experiment_vars,
)
has_deviated = journal.get("deviated", False)
has_failed = journal["status"] != "completed"
if "dry" in journal["experiment"]:
journal["experiment"]["dry"] = dry
with open(journal_path, "w") as r:
json.dump(journal, r, indent=2, ensure_ascii=False, default=encoder)
if journal["status"] == "completed":
notify(settings, RunFlowEvent.RunCompleted, journal)
elif has_failed:
notify(settings, RunFlowEvent.RunFailed, journal)
if has_deviated:
notify(settings, RunFlowEvent.RunDeviated, journal)
if (has_failed or has_deviated) and not no_exit:
ctx.exit(1)
return journal
@cli.command()
@click.option("--no-verify-tls", is_flag=True, help="Do not verify TLS certificate.")
@click.argument("source")
@click.pass_context
def validate(
ctx: click.Context, source: str, no_verify_tls: bool = False
) -> Experiment:
"""Validate the experiment at SOURCE."""
settings = load_settings(ctx.obj["settings_path"])
try:
experiment = load_experiment(source, settings, verify_tls=not no_verify_tls)
except InvalidSource as x:
logger.error(str(x))
logger.debug(x)
ctx.exit(1)
try:
notify(settings, ValidateFlowEvent.ValidateStarted, experiment)
ensure_experiment_is_valid(experiment)
notify(settings, ValidateFlowEvent.ValidateCompleted, experiment)
logger.info("experiment syntax and semantic look valid")
except ChaosException as x:
notify(settings, ValidateFlowEvent.ValidateFailed, experiment, x)
logger.error(str(x))
logger.debug(x)
ctx.exit(1)
return experiment
@cli.group()
def settings():
"""
Read, write or remove from your settings file.
"""
pass
cli.add_command(settings)
@settings.command("show")
@click.option(
"--format",
"fmt",
default="yaml",
show_default=False,
help="Output format.",
type=click.Choice(["json", "yaml"]),
)
@click.pass_context
def show_settings(ctx: click.Context, fmt: str = "json"):
"""
Show the entire content of the settings file.
Be aware this will not obfuscate secret data.
"""
if not os.path.isfile(ctx.obj["settings_path"]):
click.abort("No settings file found at {}".format(ctx.obj["settings_path"]))
settings = load_settings(ctx.obj["settings_path"]) or {}
if fmt == "json":
click.echo(json.dumps(settings, indent=2))
elif fmt == "yaml":
click.echo(yaml.dump(settings, indent=2))
settings.add_command(show_settings)
@settings.command("set")
@click.argument("key", nargs=1)
@click.argument("value", nargs=1)
@click.pass_context
def set_settings_value(ctx: click.Context, key: str, value: str = None):
"""
Set a settings value.
The value must be a valid JSON string so that it can be interpreted
with the appropriate type.
The key must be dotted path to its location in the settings file.
"""
if not os.path.isfile(ctx.obj["settings_path"]):
ctx.exit(1)
settings = load_settings(ctx.obj["settings_path"]) or {}
item = locate_settings_entry(settings, key)
if not item:
ctx.exit(1)
parent, entry, key_tail, index = item
value = json.loads(value)
if key_tail is not None:
parent[key_tail] = value
elif index is not None:
parent[index] = value
save_settings(settings, ctx.obj["settings_path"])
settings.add_command(set_settings_value)
@settings.command("remove")
@click.argument("key", nargs=1)
@click.pass_context
def remove_settings_value(ctx: click.Context, key: str):
"""
Remove a settings key and its children.
The key must be dotted path to its location in the settings file.
"""
if not os.path.isfile(ctx.obj["settings_path"]):
ctx.exit(1)
settings = load_settings(ctx.obj["settings_path"]) or {}
item = locate_settings_entry(settings, key)
if not item:
ctx.exit(1)
parent, entry, key_tail, index = item
if key_tail is not None:
parent.pop(key_tail, None)
elif index is not None:
parent.remove(parent[index])
save_settings(settings, ctx.obj["settings_path"])
settings.add_command(remove_settings_value)
@settings.command("get")
@click.option(
"--format",
"fmt",
default="yaml",
show_default=False,
help="Output format.",
type=click.Choice(["string", "json", "yaml"]),
)
@click.argument("key", nargs=1)
@click.pass_context
def get_settings_value(ctx: click.Context, key: str, fmt: str = "json"):
"""
Show a settings value.
The key must be dotted path to its location in the settings file.
"""
if not os.path.isfile(ctx.obj["settings_path"]):
ctx.exit(1)
settings = load_settings(ctx.obj["settings_path"]) or {}
item = locate_settings_entry(settings, key)
if not item:
ctx.exit(1)
parent, entry, key_tail, index = item
if fmt == "json":
click.echo(json.dumps(entry, indent=2))
elif fmt == "string":
click.echo(str(entry))
elif fmt == "yaml":
click.echo(yaml.dump(entry, indent=2))
settings.add_command(get_settings_value)
@cli.command()
@click.argument(
"target", type=click.Choice(["core", "settings", "extensions"]), metavar="TARGET"
)
@click.pass_context
def info(ctx: click.Context, target: str):
"""Display information about the Chaos Toolkit environment.
Available targets are:
* core: display the information about your version of the Chaos Toolkit
* extensions: display the list of installed extensions and plugins
* settings: display your current full settings
"""
if target not in ["core", "settings", "extensions"]:
raise click.BadArgumentUsage("Invalid target")
if target == "core":
fmt = "{:<20}{:<10}"
click.secho(fmt.format("NAME", "VERSION"), fg="bright_blue")
click.echo(fmt.format("CLI", __version__))
click.echo(fmt.format("Core library", chaoslib_version))
elif target == "extensions":
fmt = "{:<40}{:<10}{:30}{:50}"
click.secho(
fmt.format("NAME", "VERSION", "LICENSE", "DESCRIPTION"), fg="bright_blue"
)
extensions = list_extensions()
for extension in extensions:
summary = extension.summary.replace("Chaos Toolkit Extension for ", "")[:50]
click.echo(
fmt.format(
extension.name, extension.version, extension.license, summary
)
)
elif target == "settings":
settings_path = ctx.obj["settings_path"]
if not os.path.isfile(settings_path):
click.echo(f"No settings file found at {settings_path}")
return
with open(settings_path) as f:
click.echo(f.read())
@cli.command()
@click.option(
"--no-system-info", is_flag=True, help="Do not discover system information."
)
@click.option(
"--no-install", is_flag=True, help="Assume package already in PYTHONPATH."
)
@click.option(
"--discovery-path",
default="./discovery.json",
help="Path where to save the the discovery outcome.",
show_default=True,
)
@click.argument("package")
@click.pass_context
def discover(
ctx: click.Context,
package: str,
discovery_path: str = "./discovery.json",
no_system_info: bool = False,
no_install: bool = False,
) -> Discovery:
"""Discover capabilities and experiments."""
settings = load_settings(ctx.obj["settings_path"])
try:
notify(settings, DiscoverFlowEvent.DiscoverStarted, package)
discovery = disco(
package_name=package,
discover_system=not no_system_info,
download_and_install=not no_install,
)
except DiscoveryFailed as err:
notify(settings, DiscoverFlowEvent.DiscoverFailed, package, err)
logger.debug(f"Failed to discover {package}", exc_info=err)
logger.fatal(str(err))
return
with open(discovery_path, "w") as d:
d.write(json.dumps(discovery, indent=2, default=encoder))
logger.info(f"Discovery outcome saved in {discovery_path}")
notify(settings, DiscoverFlowEvent.DiscoverCompleted, discovery)
return discovery
@cli.command() # noqa: C901
@click.option(
"--discovery-path",
default="./discovery.json",
help="Path to the discovery outcome.",
show_default=True,
type=click.Path(exists=False),
)
@click.option(
"--experiment-path",
default="./experiment.json",
type=click.Path(exists=False),
help="Path where to save the experiment (.yaml or .json)",
show_default=True,
)
@click.pass_context
def init(
ctx: click.Context,
discovery_path: str = "./discovery.json", # noqa: C901
experiment_path: str = "./experiment.json",
) -> Experiment:
"""Initialize a new experiment from discovered capabilities."""
settings = load_settings(ctx.obj["settings_path"])
notify(settings, InitFlowEvent.InitStarted)
click.secho(
"You are about to create an experiment.\n"
"This wizard will walk you through each step so that you can build\n"
"the best experiment for your needs.\n"
"\n"
"An experiment is made up of three elements:\n"
"- a steady-state hypothesis [OPTIONAL]\n"
"- an experimental method\n"
"- a set of rollback activities [OPTIONAL]\n"
"\n"
"Only the method is required. Also your experiment will\n"
"not run unless you define at least one activity (probe or action)\n"
"within it",
fg="blue",
)
discovery = None
if discovery_path and os.path.exists(discovery_path):
with open(discovery_path) as d:
discovery = json.loads(d.read())
else:
click.echo("No discovery was found, let's create an empty experiment")
base_experiment = {"title": "", "description": "N/A", "tags": []}
s = click.style
title = click.prompt(s("Experiment's title", fg="green"), type=str)
base_experiment["title"] = title
click.secho(
"\nA steady state hypothesis defines what 'normality' "
"looks like in your system\n"
"The steady state hypothesis is a collection of "
"conditions that are used,\n"
"at the beginning of an experiment, to decide if the "
"system is in a recognised\n"
"'normal' state. The steady state conditions are then "
"used again when your experiment\n"
" is complete to detect where your system may have "
"deviated in an interesting,\n"
"weakness-detecting way\n"
"\n"
"Initially you may not know what your steady state "
"hypothesis is\n"
"and so instead you might create an experiment "
"without one\n"
"This is why the stead state hypothesis is optional.",
fg="blue",
)
m = s("Do you want to define a steady state hypothesis now?", dim=True)
if click.confirm(m):
hypo = {}
title = click.prompt(s("Hypothesis's title", fg="green"), type=str)
hypo["title"] = title
hypo["probes"] = []
if discovery:
activities = []
for a in discovery["activities"]:
if a["type"] == "probe":
activities.append((a["name"], a))
click.secho(
"\nYou may now define probes that will determine\n"
"the steady-state of your system.",
fg="blue",
)
add_activities(activities, hypo["probes"], with_tolerance=True)
base_experiment["steady-state-hypothesis"] = hypo
if discovery:
base_experiment["method"] = []
click.secho(
"\nAn experiment's method contains actions "
"and probes. Actions\n"
"vary real-world events in your system to determine if your\n"
"steady-state hypothesis is maintained when those events occur.\n"
"\n"
"An experimental method can also contain probes to gather"
" additional\n"
"information about your system as your method is executed.",
fg="blue",
)
m = s("Do you want to define an experimental method?", dim=True)
if click.confirm(m):
activities = [(a["name"], a) for a in discovery["activities"]]
add_activities(activities, base_experiment["method"])
click.secho(
"\nAn experiment may optionally define a set of remedial"
" actions\nthat are used to rollback the system to a given"
" state.",
fg="blue",
)
m = s("Do you want to add some rollbacks now?", dim=True)
if click.confirm(m):
rollbacks = []
activities = []
for a in discovery["activities"]:
if a["type"] == "action":
activities.append((a["name"], a))
add_activities(activities, rollbacks)
base_experiment["rollbacks"] = rollbacks
if is_yaml(experiment_path):
output = yaml.dump(
base_experiment, indent=4, default_flow_style=False, sort_keys=False
)
else:
output = json.dumps(base_experiment, indent=4, default=encoder)
with open(experiment_path, "w") as e:
e.write(output)
click.echo(f"\nExperiment created and saved in '{experiment_path}'")
notify(settings, InitFlowEvent.InitCompleted, base_experiment)
return base_experiment
# keep this after the cli group declaration for plugins to override defaults
with_plugins(importlib_metadata.entry_points().get("chaostoolkit.cli_plugins"))(cli)
def is_yaml(experiment_path: str) -> bool:
_, ext = os.path.splitext(experiment_path)
return ext.lower() in (".yaml", ".yml")
def add_activities(
activities: List[Activity],
pool: List[Activity], # noqa: C901
with_tolerance: bool = False,
):
"""
Add activities to the given pool.
"""
base_activity = {
"type": None,
"name": None,
"provider": {"type": "python", "module": None, "func": None, "arguments": {}},
}
s = click.style
echo = click.echo
if len(activities) > 20:
echo = click.echo_via_pager
click.echo(s("Add an activity", fg="green"))
echo(
"\n".join([f"{idx + 1}) {name}" for (idx, (name, a)) in enumerate(activities)])
)
activity_index = click.prompt(s("Activity (0 to escape)", fg="green"), type=int)
if not activity_index:
return
activity_index = activity_index - 1
if activity_index > len(activities):
click.secho("Please pick up a valid activity", fg="red", err=True)
add_activities(activities, pool)
return
selected = activities[activity_index][1]
selected_doc = selected.get("doc")
if selected_doc:
click.secho(f"\n{selected_doc}", fg="blue")
m = s("Do you want to use this {a}?".format(a=selected["type"]), dim=True)
if not click.confirm(m):
m = s("Do you want to select another activity?", dim=True)
if not click.confirm(m):
return
add_activities(activities, pool)
activity = base_activity.copy()
activity["name"] = selected["name"]
activity["type"] = selected["type"]
if with_tolerance:
click.secho(
"\nA steady-state probe requires a tolerance value, "
"within which\n"
"your system is in a recognised `normal` state.\n",
fg="blue",
)
tolerance_value = click.prompt(
s("What is the tolerance for this probe?", fg="green")
)
activity["tolerance"] = tolerance_value
activity["provider"] = {"type": "python"}
activity["provider"]["module"] = selected["mod"]
activity["provider"]["func"] = selected["name"]
activity["provider"]["arguments"] = {}
click.secho(
"\nYou now need to fill the arguments for this activity. Default\n"
"values will be shown between brackets. You may simply press return\n"
"to use it or not set any value.",
fg="blue",
)
for arg in selected.get("arguments", []):
arg_name = arg["name"]
if arg_name in ("secrets", "configuration"):
continue
# None is a bit of a problem because for the prompt it means
# no defaults. When the user doesn't want to set a value, then
# the prompt keeps asking. So, we pretend the default for None
# is actually the empty string.
arg_default = None
if "default" in arg:
arg_default = arg["default"]
if arg_default is None:
arg_default = ""
arg_type = portable_type_name_to_python_type(arg["type"])
question = f"Argument's value for '{arg_name}'"
m = s(question, fg="yellow")
arg_value = click.prompt(
m, default=arg_default, show_default=True, type=arg_type
)
# now, if the user didn't input anything and the default was
# None, we override it back to None
if "default" in arg:
arg_default = arg["default"]
if arg_default is None and arg_value == "":
arg_value = None
activity["provider"]["arguments"][arg["name"]] = arg_value
pool.append(activity)
m = s("Do you want to select another activity?", dim=True)
if not click.confirm(m):
return
add_activities(activities, pool)
| |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see LICENSE.
# Contact: khmer-project@idyll.org
#
import os
import shutil
import threading
import io
import screed
import khmer
from . import khmer_tst_utils as utils
from nose.plugins.attrib import attr
from .test_scripts import _make_counting
def test_normalize_by_median_indent():
infile = utils.get_test_data('paired-mixed.fa.pe')
hashfile = utils.get_test_data('normC20k20.ct')
outfile = utils.get_temp_filename('paired-mixed.fa.pe.keep')
script = 'normalize-by-median.py'
args = ['--loadtable', hashfile, '-o', outfile, infile]
(status, out, err) = utils.runscript(script, args)
assert status == 0, (out, err)
assert os.path.exists(outfile)
def test_normalize_by_median():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-2.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 98' in err, err
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 1, seqs
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
assert "IOErrors" not in err
def test_normalize_by_median_unpaired_final_read():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('single-read.fq'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '-p', infile]
try:
(status, out, err) = utils.runscript(script, args, in_dir)
raise Exception("Shouldn't get to this")
except AssertionError as e:
out = str(e)
assert "ERROR: Unpaired reads when require_paired" in out, out
def test_normalize_by_median_unforced_badfile():
CUTOFF = '1'
infile = utils.get_temp_filename("potatoes")
outfile = infile + '.keep'
in_dir = os.path.dirname(infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile]
try:
(status, out, err) = utils.runscript(script, args, in_dir)
raise Exception("Shouldn't get to this")
except AssertionError as e:
out = str(e)
assert "ERROR: [Errno 2] No such file or directory:" in out, out
if os.path.exists(outfile):
assert False, '.keep file should have been removed: '
def test_normalize_by_median_contradictory_args():
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
outfile = utils.get_temp_filename('report.out')
shutil.copyfile(utils.get_test_data('test-large.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', '1', '-k', '17', '--force-single', '-p', '-R',
outfile, infile]
try:
(status, out, err) = utils.runscript(script, args, in_dir)
raise Exception("Shouldn't get to this")
except AssertionError as e:
out = str(e)
assert "cannot both be set" in out, out
def test_normalize_by_median_stdout_3():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-2.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile, '--out', '-']
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 98' in err, err
assert 'in /dev/stdout' in err, err
assert "IOErrors" not in err
@attr('known_failing')
def test_normalize_by_median_known_good():
CUTOFF = '2'
infile = utils.get_temp_filename('test.fa.gz')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('100k-filtered.fa.gz'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '20', '-x', '4e6', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
iter_known = screed.open(utils.get_test_data('100k-filtered.fa.keep.gz'))
iter_out = screed.open(outfile)
try:
for rknown, rout in zip(iter_known, iter_out):
assert rknown.name == rout.name
except Exception as e:
print(e)
assert False
def test_normalize_by_median_report_fp():
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
outfile = utils.get_temp_filename('report.out')
shutil.copyfile(utils.get_test_data('test-large.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', '1', '-k', '17', '-R', outfile, infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert "fp rate estimated to be 0.623" in err, err
report = open(outfile, 'r')
line = report.readline()
assert "100000 25261 0.25261" in line, line
def test_normalize_by_median_unpaired_and_paired():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-paired.fa'), infile)
unpairedfile = utils.get_temp_filename('test1.fa', tempdir=in_dir)
shutil.copyfile(utils.get_test_data('random-20-a.fa'), unpairedfile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '-u', unpairedfile, '-p', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 4030' in err, err
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
def test_normalize_by_median_count_kmers_PE():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
# The test file has one pair of identical read except for the last base
# The 2nd read should be discarded in the unpaired mode
# but kept in the paired end mode adding only one more unique kmer
shutil.copyfile(utils.get_test_data('paired_one.base.dif.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '--force-single', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 98' in err, err
assert 'kept 1 of 2 or 50%' in err, err
args = ['-C', CUTOFF, '-k', '17', '-p', infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert 'Total number of unique k-mers: 99' in err, err
assert 'kept 2 of 2 or 100%' in err, err
def test_normalize_by_median_double_file_name():
infile = utils.get_temp_filename('test-abund-read-2.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-2.fa'), infile)
script = 'normalize-by-median.py'
args = [utils.get_test_data('test-abund-read-2.fa'), infile]
try:
(status, out, err) = utils.runscript(script, args, in_dir)
except AssertionError as e:
assert "Duplicate filename--Cannot handle this!" in str(e), str(e)
def test_normalize_by_median_overwrite():
outfile = utils.get_temp_filename('test.fa.keep')
shutil.copyfile(utils.get_test_data('test-abund-read.fa'), outfile)
in_dir = os.path.dirname(outfile)
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa', in_dir)
shutil.copyfile(utils.get_test_data('test-abund-read-3.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '-o', outfile, infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 1, seqs
assert 'GACAGCgtgCCGCA' in seqs[0], seqs
def test_normalize_by_median_version():
script = 'normalize-by-median.py'
args = ['--version']
status, out, err = utils.runscript(script, args)
errlines = err.splitlines()
for err in errlines:
if err.startswith('||') or \
not err.strip():
continue
break
print(errlines)
print(err)
assert err.startswith('khmer ')
def test_normalize_by_median_2():
CUTOFF = '2'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-2.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 2, seqs
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
assert seqs[1] == 'GGTTGACGGGGCTCAGGG', seqs
def test_normalize_by_median_paired():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-paired.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-p', '-k', '17', infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 2, seqs
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
assert seqs[1].startswith('GGTTGACGGGGCTCAGGG'), seqs
def test_normalize_by_median_paired_fq():
CUTOFF = '20'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-paired.fq'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-p', '-k', '17', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = [r.sequence for r in screed.open(outfile)]
assert len(seqs) == 6, len(seqs)
assert seqs[0].startswith('GGTTGACGGGGCTCAGGGGG'), seqs
assert seqs[1].startswith('GGTTGACGGGGCTCAGGG'), seqs
names = [r.name for r in screed.open(outfile, parse_description=False)]
assert len(names) == 6, names
assert '895:1:37:17593:9954 1::FOO' in names, names
assert '895:1:37:17593:9954 2::FOO' in names, names
def test_normalize_by_median_impaired():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-impaired.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-p', '-k', '17', infile]
_, out, err = utils.runscript(script, args, in_dir, fail_ok=True)
assert 'ERROR: Unpaired reads ' in err, err
def test_normalize_by_median_force():
CUTOFF = '1'
corrupt_infile = utils.get_temp_filename('test-corrupt.fq')
good_infile = utils.get_temp_filename('test-good.fq',
tempdir=os.path.dirname(
corrupt_infile))
in_dir = os.path.dirname(good_infile)
shutil.copyfile(utils.get_test_data('test-error-reads.fq'), corrupt_infile)
shutil.copyfile(utils.get_test_data('test-fastq-reads.fq'), good_infile)
script = 'normalize-by-median.py'
args = ['-f', '-C', CUTOFF, '-k', '17', corrupt_infile, good_infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert '*** Skipping' in err
assert '** IOErrors' in err
def test_normalize_by_median_no_bigcount():
infile = utils.get_temp_filename('test.fa')
hashfile = utils.get_temp_filename('test-out.ct')
outfile = infile + '.keep'
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-abund-read-2.fa'), infile)
counting_ht = _make_counting(infile, K=8)
script = 'normalize-by-median.py'
args = ['-C', '1000', '-k 8', '--savetable', hashfile, infile]
(status, out, err) = utils.runscript(script, args, in_dir)
assert status == 0, (out, err)
print((out, err))
assert os.path.exists(hashfile), hashfile
kh = khmer.load_counting_hash(hashfile)
assert kh.get('GGTTGACG') == 255
def test_normalize_by_median_empty():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-empty.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', infile]
utils.runscript(script, args, in_dir)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
def test_normalize_by_median_emptycountingtable():
CUTOFF = '1'
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-empty.fa'), infile)
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '--loadtable', infile, infile]
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
assert 'ValueError' in err, (status, out, err)
def test_normalize_by_median_fpr():
MAX_TABLESIZE_PARAM = 12
infile = utils.get_temp_filename('test-fpr.fq')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('test-fastq-reads.fq'), infile)
script = 'normalize-by-median.py'
args = ['-f', '-k 17', '-x ' + str(MAX_TABLESIZE_PARAM), infile]
(status, out, err) = utils.runscript(script, args, in_dir, fail_ok=True)
print(out)
print(err)
assert os.path.exists(infile + '.keep'), infile
assert '** ERROR: the graph structure is too small' in err, err
def write_by_chunks(infile, outfile, CHUNKSIZE=8192):
ifile = io.open(infile, 'rb')
ofile = io.open(outfile, 'wb')
chunk = ifile.read(CHUNKSIZE)
while len(chunk) > 0:
ofile.write(chunk)
chunk = ifile.read(CHUNKSIZE)
ifile.close()
ofile.close()
def test_normalize_by_median_streaming():
CUTOFF = '20'
infile = utils.get_test_data('100-reads.fq.gz')
in_dir = os.path.dirname(infile)
fifo = utils.get_temp_filename('fifo')
outfile = utils.get_temp_filename('outfile')
# Use a fifo to copy stdout to a file for checking
os.mkfifo(fifo)
thread = threading.Thread(target=write_by_chunks, args=(fifo, outfile))
thread.start()
# Execute diginorm
script = 'normalize-by-median.py'
args = ['-C', CUTOFF, '-k', '17', '-o', fifo, infile]
(status, out, err) = utils.runscript(script, args, in_dir)
# Merge the thread
thread.join()
assert os.path.exists(outfile), outfile
with open(outfile) as fp:
linecount = sum(1 for _ in fp)
assert linecount == 400
def test_diginorm_basic_functionality_1():
# each of these pairs has both a multicopy sequence ('ACTTCA...') and
# a random sequence. With 'C=1' and '-p', all should be kept.
CUTOFF = ['-C', '1']
PAIRING = ['-p']
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('dn-test-all-paired-all-keep.fa'),
infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + list(PAIRING) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1', 'a/2',
'b/1', 'b/2',
'c/1', 'c/2',
'd/1', 'd/2']), seqs
def test_diginorm_basic_functionality_2():
# each of these pairs has both a multicopy sequence ('ACTTCA...')
# and a random sequence ('G...'). With 'C=1' and '--force-
# single', only random seqs should be kept, together with one copy
# of the multicopy sequence.
CUTOFF = ['-C', '1']
PAIRING = ['--force-single']
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('dn-test-all-paired-all-keep.fa'),
infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + list(PAIRING) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1', 'a/2',
'b/2',
'c/1',
'd/2']), seqs
def test_diginorm_basic_functionality_3():
# This data is entirely unpaired, but with one duplicate ('A...').
# and a random sequence ('G...'). With 'C=1' only three seqs should
# be left, with no other complaints.
CUTOFF = ['-C', '1']
PAIRING = []
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('dn-test-none-paired.fa'),
infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + list(PAIRING) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1',
'b/2',
'd/1']), seqs
def test_diginorm_basic_functionality_4():
# This data is mixed paired/unpaired, but with one duplicate ('A...').
# and a random sequence ('G...'). With 'C=2' all of the sequences
# should be kept.
CUTOFF = ['-C', '1']
PAIRING = ['-p']
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('dn-test-some-paired-all-keep.fa'),
infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + list(PAIRING) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1', 'a/2',
'b/2',
'c/1', 'c/2',
'd/2']), seqs
def test_diginorm_basic_functionality_4():
# each of these pairs has both a multicopy sequence ('ACTTCA...') and
# a random sequence. With 'C=1' and '-p', all should be
CUTOFF = ['-C', '1']
PAIRING = ['-p']
infile = utils.get_temp_filename('test.fa')
in_dir = os.path.dirname(infile)
shutil.copyfile(utils.get_test_data('dn-test-all-paired-all-keep.fa'),
infile)
script = 'normalize-by-median.py'
args = list(CUTOFF) + list(PAIRING) + ['-k', '15', infile]
_, out, err = utils.runscript(script, args, in_dir)
print(out)
print(err)
outfile = infile + '.keep'
assert os.path.exists(outfile), outfile
seqs = set([r.name for r in screed.open(outfile)])
assert seqs == set(['a/1', 'a/2',
'b/1', 'b/2',
'c/1', 'c/2',
'd/1', 'd/2']), seqs
| |
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pysensu_yelp
import pytest
from paasta_tools import check_flink_services_health
from paasta_tools import check_services_replication_tools
from paasta_tools.check_flink_services_health import check_under_registered_taskmanagers
from paasta_tools.utils import compose_job_id
check_flink_services_health.log = mock.Mock()
check_services_replication_tools.log = mock.Mock()
@pytest.fixture
def instance_config():
service = "fake_service"
instance = "fake_instance"
job_id = compose_job_id(service, instance)
mock_instance_config = mock.Mock(
service=service,
instance=instance,
cluster="fake_cluster",
soa_dir="fake_soa_dir",
job_id=job_id,
config_dict={},
)
mock_instance_config.get_replication_crit_percentage.return_value = 100
mock_instance_config.get_registrations.return_value = [job_id]
return mock_instance_config
@mock.patch(
"paasta_tools.flink_tools.get_flink_jobmanager_overview",
autospec=True,
return_value={"taskmanagers": 3},
)
def test_check_under_registered_taskmanagers_ok(mock_overview, instance_config):
under, output, description = check_under_registered_taskmanagers(
instance_config, expected_count=3, cr_name="fake--service-575c857546"
)
assert not under
assert (
"fake_service.fake_instance has 3/3 taskmanagers "
"reported by dashboard (threshold: 100%)"
) in output
assert "fake_service.fake_instance taskmanager is available" in description
@mock.patch(
"paasta_tools.flink_tools.get_flink_jobmanager_overview",
autospec=True,
return_value={"taskmanagers": 2},
)
def test_check_under_registered_taskmanagers_under(mock_overview, instance_config):
under, output, description = check_under_registered_taskmanagers(
instance_config, expected_count=3, cr_name="fake--service-575c857546"
)
assert under
assert (
"fake_service.fake_instance has 2/3 taskmanagers "
"reported by dashboard (threshold: 100%)"
) in output
assert (
"paasta status -s fake_service -i fake_instance -c fake_cluster -vv"
in description
)
@mock.patch(
"paasta_tools.flink_tools.get_flink_jobmanager_overview",
autospec=True,
side_effect=ValueError("dummy exception"),
)
def test_check_under_registered_taskmanagers_error(mock_overview, instance_config):
under, output, description = check_under_registered_taskmanagers(
instance_config, expected_count=3, cr_name="fake--service-575c857546"
)
assert under
assert (
"Dashboard of service fake_service.fake_instance is not available "
"(dummy exception)"
) in output
assert (
"paasta status -s fake_service -i fake_instance -c fake_cluster -vv"
in description
)
def test_check_flink_service_health_healthy(instance_config):
all_pods = []
with mock.patch(
"paasta_tools.check_flink_services_health.healthy_flink_containers_cnt",
autospec=True,
return_value=1,
), mock.patch(
"paasta_tools.check_flink_services_health.check_under_replication",
autospec=True,
return_value=(False, "OK", "check_rep"),
) as mock_check_under_replication, mock.patch(
"paasta_tools.check_flink_services_health.check_under_registered_taskmanagers",
autospec=True,
return_value=(False, "OK", "check_task"),
) as mock_check_under_registered_taskmanagers, mock.patch(
"paasta_tools.check_flink_services_health.send_replication_event", autospec=True
) as mock_send_replication_event:
instance_config.config_dict["taskmanager"] = {"instances": 3}
check_flink_services_health.check_flink_service_health(
instance_config=instance_config,
all_tasks_or_pods=all_pods,
replication_checker=None,
dry_run=True,
)
expected = [
mock.call(
instance_config=instance_config,
expected_count=1,
num_available=1,
sub_component="supervisor",
),
mock.call(
instance_config=instance_config,
expected_count=1,
num_available=1,
sub_component="jobmanager",
),
mock.call(
instance_config=instance_config,
expected_count=3,
num_available=1,
sub_component="taskmanager",
),
]
mock_check_under_replication.assert_has_calls(expected)
mock_check_under_registered_taskmanagers.assert_called_once_with(
instance_config=instance_config, expected_count=3, cr_name=""
)
mock_send_replication_event.assert_called_once_with(
instance_config=instance_config,
status=pysensu_yelp.Status.OK,
output="OK, OK, OK, OK",
description="check_rep\n########\ncheck_rep\n########\ncheck_rep\n########\ncheck_task",
dry_run=True,
)
def test_check_flink_service_health_too_few_taskmanagers(instance_config):
def check_under_replication_side_effect(*args, **kwargs):
if kwargs["sub_component"] == "supervisor":
return False, "OK", "check_rep"
if kwargs["sub_component"] == "jobmanager":
return False, "OK", "check_rep"
if kwargs["sub_component"] == "taskmanager":
return True, "NOPE", "check_rep"
all_pods = []
with mock.patch(
"paasta_tools.check_flink_services_health.healthy_flink_containers_cnt",
autospec=True,
return_value=1,
), mock.patch(
"paasta_tools.check_flink_services_health.check_under_registered_taskmanagers",
autospec=True,
return_value=(True, "NOPE", "check_task"),
) as mock_check_under_registered_taskmanagers, mock.patch(
"paasta_tools.check_flink_services_health.check_under_replication",
autospec=True,
side_effect=check_under_replication_side_effect,
) as mock_check_under_replication, mock.patch(
"paasta_tools.check_flink_services_health.send_replication_event", autospec=True
) as mock_send_replication_event:
instance_config.config_dict["taskmanager"] = {"instances": 3}
check_flink_services_health.check_flink_service_health(
instance_config=instance_config,
all_tasks_or_pods=all_pods,
replication_checker=None,
dry_run=True,
)
expected = [
mock.call(
instance_config=instance_config,
expected_count=1,
num_available=1,
sub_component="supervisor",
),
mock.call(
instance_config=instance_config,
expected_count=1,
num_available=1,
sub_component="jobmanager",
),
mock.call(
instance_config=instance_config,
expected_count=3,
num_available=1,
sub_component="taskmanager",
),
]
mock_check_under_replication.assert_has_calls(expected)
mock_check_under_registered_taskmanagers.assert_called_once_with(
instance_config=instance_config, expected_count=3, cr_name=""
)
mock_send_replication_event.assert_called_once_with(
instance_config=instance_config,
status=pysensu_yelp.Status.CRITICAL,
output="OK, OK, NOPE, NOPE",
description="check_rep\n########\ncheck_rep\n########\ncheck_rep\n########\ncheck_task",
dry_run=True,
)
def test_check_flink_service_health_under_registered_taskamanagers(instance_config):
all_pods = []
with mock.patch(
"paasta_tools.check_flink_services_health.healthy_flink_containers_cnt",
autospec=True,
return_value=1,
), mock.patch(
"paasta_tools.check_flink_services_health.check_under_replication",
autospec=True,
return_value=(False, "OK", "check_rep"),
) as mock_check_under_replication, mock.patch(
"paasta_tools.check_flink_services_health.check_under_registered_taskmanagers",
autospec=True,
return_value=(True, "NOPE", "check_task"),
) as mock_check_under_registered_taskmanagers, mock.patch(
"paasta_tools.check_flink_services_health.send_replication_event", autospec=True
) as mock_send_replication_event:
instance_config.config_dict["taskmanager"] = {"instances": 3}
check_flink_services_health.check_flink_service_health(
instance_config=instance_config,
all_tasks_or_pods=all_pods,
replication_checker=None,
dry_run=True,
)
expected = [
mock.call(
instance_config=instance_config,
expected_count=1,
num_available=1,
sub_component="supervisor",
),
mock.call(
instance_config=instance_config,
expected_count=1,
num_available=1,
sub_component="jobmanager",
),
mock.call(
instance_config=instance_config,
expected_count=3,
num_available=1,
sub_component="taskmanager",
),
]
mock_check_under_replication.assert_has_calls(expected)
mock_check_under_registered_taskmanagers.assert_called_once_with(
instance_config=instance_config, expected_count=3, cr_name=""
)
mock_send_replication_event.assert_called_once_with(
instance_config=instance_config,
status=pysensu_yelp.Status.CRITICAL,
output="OK, OK, OK, NOPE",
description="check_rep\n########\ncheck_rep\n########\ncheck_rep\n########\ncheck_task",
dry_run=True,
)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A common module for postgres like databases, such as postgres or redshift
"""
import abc
import logging
import luigi
import luigi.task
logger = logging.getLogger('luigi-interface')
class _MetadataColumnsMixin(object):
"""Provide an additional behavior that adds columns and values to tables
This mixin is used to provide an additional behavior that allow a task to
add generic metadata columns to every table created for both PSQL and
Redshift.
Example:
This is a use-case example of how this mixin could come handy and how
to use it.
.. code:: python
class CommonMetaColumnsBehavior(object):
def update_report_execution_date_query(self):
query = "UPDATE {0} " \
"SET date_param = DATE '{1}' " \
"WHERE date_param IS NULL".format(self.table, self.date)
return query
@property
def metadata_columns(self):
if self.date:
cols.append(('date_param', 'VARCHAR'))
return cols
@property
def metadata_queries(self):
queries = [self.update_created_tz_query()]
if self.date:
queries.append(self.update_report_execution_date_query())
return queries
class RedshiftCopyCSVToTableFromS3(CommonMetaColumnsBehavior, redshift.S3CopyToTable):
"We have some business override here that would only add noise to the
example, so let's assume that this is only a shell."
pass
class UpdateTableA(RedshiftCopyCSVToTableFromS3):
date = luigi.Parameter()
table = 'tableA'
def queries():
return [query_content_for('/queries/deduplicate_dupes.sql')]
class UpdateTableB(RedshiftCopyCSVToTableFromS3):
date = luigi.Parameter()
table = 'tableB'
"""
@property
def metadata_columns(self):
"""Returns the default metadata columns.
Those columns are columns that we want each tables to have by default.
"""
return []
@property
def metadata_queries(self):
return []
@property
def enable_metadata_columns(self):
return False
def _add_metadata_columns(self, connection):
cursor = connection.cursor()
for column in self.metadata_columns:
if len(column) == 0:
raise ValueError("_add_metadata_columns is unable to infer column information from column {column} for {table}".format(column=column,
table=self.table))
column_name = column[0]
if not self._column_exists(cursor, column_name):
logger.info('Adding missing metadata column {column} to {table}'.format(column=column, table=self.table))
self._add_column_to_table(cursor, column)
def _column_exists(self, cursor, column_name):
if '.' in self.table:
schema, table = self.table.split('.')
query = "SELECT 1 AS column_exists " \
"FROM information_schema.columns " \
"WHERE table_schema = LOWER('{0}') AND table_name = LOWER('{1}') AND column_name = LOWER('{2}') LIMIT 1;".format(schema, table, column_name)
else:
query = "SELECT 1 AS column_exists " \
"FROM information_schema.columns " \
"WHERE table_name = LOWER('{0}') AND column_name = LOWER('{1}') LIMIT 1;".format(self.table, column_name)
cursor.execute(query)
result = cursor.fetchone()
return bool(result)
def _add_column_to_table(self, cursor, column):
if len(column) == 1:
raise ValueError("_add_column_to_table() column type not specified for {column}".format(column=column[0]))
elif len(column) == 2:
query = "ALTER TABLE {table} ADD COLUMN {column};".format(table=self.table, column=' '.join(column))
elif len(column) == 3:
query = "ALTER TABLE {table} ADD COLUMN {column} ENCODE {encoding};".format(table=self.table, column=' '.join(column[0:2]), encoding=column[2])
else:
raise ValueError("_add_column_to_table() found no matching behavior for {column}".format(column=column))
cursor.execute(query)
def post_copy_metacolumns(self, cursor):
logger.info('Executing post copy metadata queries')
for query in self.metadata_queries:
cursor.execute(query)
class CopyToTable(luigi.task.MixinNaiveBulkComplete, _MetadataColumnsMixin, luigi.Task):
"""
An abstract task for inserting a data set into RDBMS.
Usage:
Subclass and override the following attributes:
* `host`,
* `database`,
* `user`,
* `password`,
* `table`
* `columns`
* `port`
"""
@abc.abstractproperty
def host(self):
return None
@abc.abstractproperty
def database(self):
return None
@abc.abstractproperty
def user(self):
return None
@abc.abstractproperty
def password(self):
return None
@abc.abstractproperty
def table(self):
return None
@property
def port(self):
return None
# specify the columns that are to be inserted (same as are returned by columns)
# overload this in subclasses with the either column names of columns to import:
# e.g. ['id', 'username', 'inserted']
# or tuples with column name, postgres column type strings:
# e.g. [('id', 'SERIAL PRIMARY KEY'), ('username', 'VARCHAR(255)'), ('inserted', 'DATETIME')]
columns = []
# options
null_values = (None,) # container of values that should be inserted as NULL values
column_separator = "\t" # how columns are separated in the file copied into postgres
def create_table(self, connection):
"""
Override to provide code for creating the target table.
By default it will be created using types (optionally) specified in columns.
If overridden, use the provided connection object for setting up the table in order to
create the table and insert data using the same transaction.
"""
if len(self.columns[0]) == 1:
# only names of columns specified, no types
raise NotImplementedError("create_table() not implemented for %r and columns types not specified" % self.table)
elif len(self.columns[0]) == 2:
# if columns is specified as (name, type) tuples
coldefs = ','.join(
'{name} {type}'.format(name=name, type=type) for name, type in self.columns
)
query = "CREATE TABLE {table} ({coldefs})".format(table=self.table, coldefs=coldefs)
connection.cursor().execute(query)
@property
def update_id(self):
"""
This update id will be a unique identifier for this insert on this table.
"""
return self.task_id
@abc.abstractmethod
def output(self):
raise NotImplementedError("This method must be overridden")
def init_copy(self, connection):
"""
Override to perform custom queries.
Any code here will be formed in the same transaction as the main copy, just prior to copying data.
Example use cases include truncating the table or removing all data older than X in the database
to keep a rolling window of data available in the table.
"""
# TODO: remove this after sufficient time so most people using the
# clear_table attribtue will have noticed it doesn't work anymore
if hasattr(self, "clear_table"):
raise Exception("The clear_table attribute has been removed. Override init_copy instead!")
if self.enable_metadata_columns:
self._add_metadata_columns(connection.cursor())
def post_copy(self, connection):
"""
Override to perform custom queries.
Any code here will be formed in the same transaction as the main copy, just after copying data.
Example use cases include cleansing data in temp table prior to insertion into real table.
"""
pass
@abc.abstractmethod
def copy(self, cursor, file):
raise NotImplementedError("This method must be overridden")
class Query(luigi.task.MixinNaiveBulkComplete, luigi.Task):
"""
An abstract task for executing an RDBMS query.
Usage:
Subclass and override the following attributes:
* `host`,
* `database`,
* `user`,
* `password`,
* `table`,
* `query`
Optionally override:
* `port`,
* `autocommit`
* `update_id`
Subclass and override the following methods:
* `run`
* `output`
"""
@abc.abstractproperty
def host(self):
"""
Host of the RDBMS. Implementation should support `hostname:port`
to encode port.
"""
return None
@property
def port(self):
"""
Override to specify port separately from host.
"""
return None
@abc.abstractproperty
def database(self):
return None
@abc.abstractproperty
def user(self):
return None
@abc.abstractproperty
def password(self):
return None
@abc.abstractproperty
def table(self):
return None
@abc.abstractproperty
def query(self):
return None
@property
def autocommit(self):
return False
@property
def update_id(self):
"""
Override to create a custom marker table 'update_id' signature for Query subclass task instances
"""
return self.task_id
@abc.abstractmethod
def run(self):
raise NotImplementedError("This method must be overridden")
@abc.abstractmethod
def output(self):
"""
Override with an RDBMS Target (e.g. PostgresTarget or RedshiftTarget) to record execution in a marker table
"""
raise NotImplementedError("This method must be overridden")
| |
# -*- coding: utf-8 -*-
"""
oauthlib.oauth1.rfc5849.signature
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module represents a direct implementation of `section 3.4`_ of the spec.
Terminology:
* Client: software interfacing with an OAuth API
* Server: the API provider
* Resource Owner: the user who is granting authorization to the client
Steps for signing a request:
1. Collect parameters from the uri query, auth header, & body
2. Normalize those parameters
3. Normalize the uri
4. Pass the normalized uri, normalized parameters, and http method to
construct the base string
5. Pass the base string and any keys needed to a signing function
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
"""
from __future__ import absolute_import, unicode_literals
import binascii
import hashlib
import hmac
import logging
from oauthlib.common import (bytes_type, extract_params, safe_string_equals,
unicode_type, urldecode)
from . import utils
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
log = logging.getLogger(__name__)
def construct_base_string(http_method, base_string_uri,
normalized_encoded_request_parameters):
"""**String Construction**
Per `section 3.4.1.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"
c2&a3=2+q
is represented by the following signature base string (line breaks
are for display purposes only)::
POST&http%3A%2F%2Fexample.com%2Frequest&a2%3Dr%2520b%26a3%3D2%2520q
%26a3%3Da%26b5%3D%253D%25253D%26c%2540%3D%26c2%3D%26oauth_consumer_
key%3D9djdj82h48djs9d2%26oauth_nonce%3D7d8f3e4a%26oauth_signature_m
ethod%3DHMAC-SHA1%26oauth_timestamp%3D137131201%26oauth_token%3Dkkk
9d7dh3k39sjv7
.. _`section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
"""
# The signature base string is constructed by concatenating together,
# in order, the following HTTP request elements:
# 1. The HTTP request method in uppercase. For example: "HEAD",
# "GET", "POST", etc. If the request uses a custom HTTP method, it
# MUST be encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
base_string = utils.escape(http_method.upper())
# 2. An "&" character (ASCII code 38).
base_string += '&'
# 3. The base string URI from `Section 3.4.1.2`_, after being encoded
# (`Section 3.6`_).
#
# .. _`Section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2
# .. _`Section 3.4.6`: https://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(base_string_uri)
# 4. An "&" character (ASCII code 38).
base_string += '&'
# 5. The request parameters as normalized in `Section 3.4.1.3.2`_, after
# being encoded (`Section 3.6`).
#
# .. _`Section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
# .. _`Section 3.4.6`: https://tools.ietf.org/html/rfc5849#section-3.4.6
base_string += utils.escape(normalized_encoded_request_parameters)
return base_string
def normalize_base_string_uri(uri, host=None):
"""**Base String URI**
Per `section 3.4.1.2`_ of the spec.
For example, the HTTP request::
GET /r%20v/X?id=123 HTTP/1.1
Host: EXAMPLE.COM:80
is represented by the base string URI: "http://example.com/r%20v/X".
In another example, the HTTPS request::
GET /?q=1 HTTP/1.1
Host: www.example.net:8080
is represented by the base string URI: "https://www.example.net:8080/".
.. _`section 3.4.1.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.2
The host argument overrides the netloc part of the uri argument.
"""
if not isinstance(uri, unicode_type):
raise ValueError('uri must be a unicode object.')
# FIXME: urlparse does not support unicode
scheme, netloc, path, params, query, fragment = urlparse.urlparse(uri)
# The scheme, authority, and path of the request resource URI `RFC3986`
# are included by constructing an "http" or "https" URI representing
# the request resource (without the query or fragment) as follows:
#
# .. _`RFC3986`: https://tools.ietf.org/html/rfc3986
if not scheme or not netloc:
raise ValueError('uri must include a scheme and netloc')
# Per `RFC 2616 section 5.1.2`_:
#
# Note that the absolute path cannot be empty; if none is present in
# the original URI, it MUST be given as "/" (the server root).
#
# .. _`RFC 2616 section 5.1.2`: https://tools.ietf.org/html/rfc2616#section-5.1.2
if not path:
path = '/'
# 1. The scheme and host MUST be in lowercase.
scheme = scheme.lower()
netloc = netloc.lower()
# 2. The host and port values MUST match the content of the HTTP
# request "Host" header field.
if host is not None:
netloc = host.lower()
# 3. The port MUST be included if it is not the default port for the
# scheme, and MUST be excluded if it is the default. Specifically,
# the port MUST be excluded when making an HTTP request `RFC2616`_
# to port 80 or when making an HTTPS request `RFC2818`_ to port 443.
# All other non-default port numbers MUST be included.
#
# .. _`RFC2616`: https://tools.ietf.org/html/rfc2616
# .. _`RFC2818`: https://tools.ietf.org/html/rfc2818
default_ports = (
('http', '80'),
('https', '443'),
)
if ':' in netloc:
host, port = netloc.split(':', 1)
if (scheme, port) in default_ports:
netloc = host
return urlparse.urlunparse((scheme, netloc, path, params, '', ''))
# ** Request Parameters **
#
# Per `section 3.4.1.3`_ of the spec.
#
# In order to guarantee a consistent and reproducible representation of
# the request parameters, the parameters are collected and decoded to
# their original decoded form. They are then sorted and encoded in a
# particular manner that is often different from their original
# encoding scheme, and concatenated into a single string.
#
# .. _`section 3.4.1.3`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3
def collect_parameters(uri_query='', body=[], headers=None,
exclude_oauth_signature=True, with_realm=False):
"""**Parameter Sources**
Parameters starting with `oauth_` will be unescaped.
Body parameters must be supplied as a dict, a list of 2-tuples, or a
formencoded query string.
Headers must be supplied as a dict.
Per `section 3.4.1.3.1`_ of the spec.
For example, the HTTP request::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="djosJKDKJSD8743243%2Fjdk33klY%3D"
c2&a3=2+q
contains the following (fully decoded) parameters used in the
signature base sting::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | =%3D |
| a3 | a |
| c@ | |
| a2 | r b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2 q |
+------------------------+------------------+
Note that the value of "b5" is "=%3D" and not "==". Both "c@" and
"c2" have empty values. While the encoding rules specified in this
specification for the purpose of constructing the signature base
string exclude the use of a "+" character (ASCII code 43) to
represent an encoded space character (ASCII code 32), this practice
is widely used in "application/x-www-form-urlencoded" encoded values,
and MUST be properly decoded, as demonstrated by one of the "a3"
parameter instances (the "a3" parameter is used twice in this
request).
.. _`section 3.4.1.3.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.1
"""
headers = headers or {}
params = []
# The parameters from the following sources are collected into a single
# list of name/value pairs:
# * The query component of the HTTP request URI as defined by
# `RFC3986, Section 3.4`_. The query component is parsed into a list
# of name/value pairs by treating it as an
# "application/x-www-form-urlencoded" string, separating the names
# and values and decoding them as defined by
# `W3C.REC-html40-19980424`_, Section 17.13.4.
#
# .. _`RFC3986, Section 3.4`: https://tools.ietf.org/html/rfc3986#section-3.4
# .. _`W3C.REC-html40-19980424`: https://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424
if uri_query:
params.extend(urldecode(uri_query))
# * The OAuth HTTP "Authorization" header field (`Section 3.5.1`_) if
# present. The header's content is parsed into a list of name/value
# pairs excluding the "realm" parameter if present. The parameter
# values are decoded as defined by `Section 3.5.1`_.
#
# .. _`Section 3.5.1`: https://tools.ietf.org/html/rfc5849#section-3.5.1
if headers:
headers_lower = dict((k.lower(), v) for k, v in headers.items())
authorization_header = headers_lower.get('authorization')
if authorization_header is not None:
params.extend([i for i in utils.parse_authorization_header(
authorization_header) if with_realm or i[0] != 'realm'])
# * The HTTP request entity-body, but only if all of the following
# conditions are met:
# * The entity-body is single-part.
#
# * The entity-body follows the encoding requirements of the
# "application/x-www-form-urlencoded" content-type as defined by
# `W3C.REC-html40-19980424`_.
# * The HTTP request entity-header includes the "Content-Type"
# header field set to "application/x-www-form-urlencoded".
#
# .._`W3C.REC-html40-19980424`: https://tools.ietf.org/html/rfc5849#ref-W3C.REC-html40-19980424
# TODO: enforce header param inclusion conditions
bodyparams = extract_params(body) or []
params.extend(bodyparams)
# ensure all oauth params are unescaped
unescaped_params = []
for k, v in params:
if k.startswith('oauth_'):
v = utils.unescape(v)
unescaped_params.append((k, v))
# The "oauth_signature" parameter MUST be excluded from the signature
# base string if present.
if exclude_oauth_signature:
unescaped_params = list(filter(lambda i: i[0] != 'oauth_signature',
unescaped_params))
return unescaped_params
def normalize_parameters(params):
"""**Parameters Normalization**
Per `section 3.4.1.3.2`_ of the spec.
For example, the list of parameters from the previous section would
be normalized as follows:
Encoded::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| b5 | %3D%253D |
| a3 | a |
| c%40 | |
| a2 | r%20b |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_token | kkk9d7dh3k39sjv7 |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_nonce | 7d8f3e4a |
| c2 | |
| a3 | 2%20q |
+------------------------+------------------+
Sorted::
+------------------------+------------------+
| Name | Value |
+------------------------+------------------+
| a2 | r%20b |
| a3 | 2%20q |
| a3 | a |
| b5 | %3D%253D |
| c%40 | |
| c2 | |
| oauth_consumer_key | 9djdj82h48djs9d2 |
| oauth_nonce | 7d8f3e4a |
| oauth_signature_method | HMAC-SHA1 |
| oauth_timestamp | 137131201 |
| oauth_token | kkk9d7dh3k39sjv7 |
+------------------------+------------------+
Concatenated Pairs::
+-------------------------------------+
| Name=Value |
+-------------------------------------+
| a2=r%20b |
| a3=2%20q |
| a3=a |
| b5=%3D%253D |
| c%40= |
| c2= |
| oauth_consumer_key=9djdj82h48djs9d2 |
| oauth_nonce=7d8f3e4a |
| oauth_signature_method=HMAC-SHA1 |
| oauth_timestamp=137131201 |
| oauth_token=kkk9d7dh3k39sjv7 |
+-------------------------------------+
and concatenated together into a single string (line breaks are for
display purposes only)::
a2=r%20b&a3=2%20q&a3=a&b5=%3D%253D&c%40=&c2=&oauth_consumer_key=9dj
dj82h48djs9d2&oauth_nonce=7d8f3e4a&oauth_signature_method=HMAC-SHA1
&oauth_timestamp=137131201&oauth_token=kkk9d7dh3k39sjv7
.. _`section 3.4.1.3.2`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3.2
"""
# The parameters collected in `Section 3.4.1.3`_ are normalized into a
# single string as follows:
#
# .. _`Section 3.4.1.3`: https://tools.ietf.org/html/rfc5849#section-3.4.1.3
# 1. First, the name and value of each parameter are encoded
# (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key_values = [(utils.escape(k), utils.escape(v)) for k, v in params]
# 2. The parameters are sorted by name, using ascending byte value
# ordering. If two or more parameters share the same name, they
# are sorted by their value.
key_values.sort()
# 3. The name of each parameter is concatenated to its corresponding
# value using an "=" character (ASCII code 61) as a separator, even
# if the value is empty.
parameter_parts = ['{0}={1}'.format(k, v) for k, v in key_values]
# 4. The sorted name/value pairs are concatenated together into a
# single string by using an "&" character (ASCII code 38) as
# separator.
return '&'.join(parameter_parts)
def sign_hmac_sha1_with_client(base_string, client):
return sign_hmac_sha1(base_string,
client.client_secret,
client.resource_owner_secret
)
def sign_hmac_sha1(base_string, client_secret, resource_owner_secret):
"""**HMAC-SHA1**
The "HMAC-SHA1" signature method uses the HMAC-SHA1 signature
algorithm as defined in `RFC2104`_::
digest = HMAC-SHA1 (key, text)
Per `section 3.4.2`_ of the spec.
.. _`RFC2104`: https://tools.ietf.org/html/rfc2104
.. _`section 3.4.2`: https://tools.ietf.org/html/rfc5849#section-3.4.2
"""
# The HMAC-SHA1 function variables are used in following way:
# text is set to the value of the signature base string from
# `Section 3.4.1.1`_.
#
# .. _`Section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
text = base_string
# key is set to the concatenated values of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included
# even when either secret is empty.
key += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key += utils.escape(resource_owner_secret or '')
# FIXME: HMAC does not support unicode!
key_utf8 = key.encode('utf-8')
text_utf8 = text.encode('utf-8')
signature = hmac.new(key_utf8, text_utf8, hashlib.sha1)
# digest is used to set the value of the "oauth_signature" protocol
# parameter, after the result octet string is base64-encoded
# per `RFC2045, Section 6.8`.
#
# .. _`RFC2045, Section 6.8`: https://tools.ietf.org/html/rfc2045#section-6.8
return binascii.b2a_base64(signature.digest())[:-1].decode('utf-8')
def sign_hmac_sha256_with_client(base_string, client):
return sign_hmac_sha256(base_string,
client.client_secret,
client.resource_owner_secret
)
def sign_hmac_sha256(base_string, client_secret, resource_owner_secret):
"""**HMAC-SHA256**
The "HMAC-SHA256" signature method uses the HMAC-SHA256 signature
algorithm as defined in `RFC4634`_::
digest = HMAC-SHA256 (key, text)
Per `section 3.4.2`_ of the spec.
.. _`RFC4634`: https://tools.ietf.org/html/rfc4634
.. _`section 3.4.2`: https://tools.ietf.org/html/rfc5849#section-3.4.2
"""
# The HMAC-SHA256 function variables are used in following way:
# text is set to the value of the signature base string from
# `Section 3.4.1.1`_.
#
# .. _`Section 3.4.1.1`: https://tools.ietf.org/html/rfc5849#section-3.4.1.1
text = base_string
# key is set to the concatenated values of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included
# even when either secret is empty.
key += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
key += utils.escape(resource_owner_secret or '')
# FIXME: HMAC does not support unicode!
key_utf8 = key.encode('utf-8')
text_utf8 = text.encode('utf-8')
signature = hmac.new(key_utf8, text_utf8, hashlib.sha256)
# digest is used to set the value of the "oauth_signature" protocol
# parameter, after the result octet string is base64-encoded
# per `RFC2045, Section 6.8`.
#
# .. _`RFC2045, Section 6.8`: https://tools.ietf.org/html/rfc2045#section-6.8
return binascii.b2a_base64(signature.digest())[:-1].decode('utf-8')
_jwtrs1 = None
#jwt has some nice pycrypto/cryptography abstractions
def _jwt_rs1_signing_algorithm():
global _jwtrs1
if _jwtrs1 is None:
import jwt.algorithms as jwtalgo
_jwtrs1 = jwtalgo.RSAAlgorithm(jwtalgo.hashes.SHA1)
return _jwtrs1
def sign_rsa_sha1(base_string, rsa_private_key):
"""**RSA-SHA1**
Per `section 3.4.3`_ of the spec.
The "RSA-SHA1" signature method uses the RSASSA-PKCS1-v1_5 signature
algorithm as defined in `RFC3447, Section 8.2`_ (also known as
PKCS#1), using SHA-1 as the hash function for EMSA-PKCS1-v1_5. To
use this method, the client MUST have established client credentials
with the server that included its RSA public key (in a manner that is
beyond the scope of this specification).
.. _`section 3.4.3`: https://tools.ietf.org/html/rfc5849#section-3.4.3
.. _`RFC3447, Section 8.2`: https://tools.ietf.org/html/rfc3447#section-8.2
"""
if isinstance(base_string, unicode_type):
base_string = base_string.encode('utf-8')
# TODO: finish RSA documentation
alg = _jwt_rs1_signing_algorithm()
key = _prepare_key_plus(alg, rsa_private_key)
s=alg.sign(base_string, key)
return binascii.b2a_base64(s)[:-1].decode('utf-8')
def sign_rsa_sha1_with_client(base_string, client):
if not client.rsa_key:
raise ValueError('rsa_key is required when using RSA signature method.')
return sign_rsa_sha1(base_string, client.rsa_key)
def sign_plaintext(client_secret, resource_owner_secret):
"""Sign a request using plaintext.
Per `section 3.4.4`_ of the spec.
The "PLAINTEXT" method does not employ a signature algorithm. It
MUST be used with a transport-layer mechanism such as TLS or SSL (or
sent over a secure channel with equivalent protections). It does not
utilize the signature base string or the "oauth_timestamp" and
"oauth_nonce" parameters.
.. _`section 3.4.4`: https://tools.ietf.org/html/rfc5849#section-3.4.4
"""
# The "oauth_signature" protocol parameter is set to the concatenated
# value of:
# 1. The client shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
signature = utils.escape(client_secret or '')
# 2. An "&" character (ASCII code 38), which MUST be included even
# when either secret is empty.
signature += '&'
# 3. The token shared-secret, after being encoded (`Section 3.6`_).
#
# .. _`Section 3.6`: https://tools.ietf.org/html/rfc5849#section-3.6
signature += utils.escape(resource_owner_secret or '')
return signature
def sign_plaintext_with_client(base_string, client):
return sign_plaintext(client.client_secret, client.resource_owner_secret)
def verify_hmac_sha1(request, client_secret=None,
resource_owner_secret=None):
"""Verify a HMAC-SHA1 signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
"""
norm_params = normalize_parameters(request.params)
uri = normalize_base_string_uri(request.uri)
base_string = construct_base_string(request.http_method, uri, norm_params)
signature = sign_hmac_sha1(base_string, client_secret,
resource_owner_secret)
match = safe_string_equals(signature, request.signature)
if not match:
log.debug('Verify HMAC-SHA1 failed: sig base string: %s', base_string)
return match
def _prepare_key_plus(alg, keystr):
if isinstance(keystr, bytes_type):
keystr = keystr.decode('utf-8')
return alg.prepare_key(keystr)
def verify_rsa_sha1(request, rsa_public_key):
"""Verify a RSASSA-PKCS #1 v1.5 base64 encoded signature.
Per `section 3.4.3`_ of the spec.
Note this method requires the jwt and cryptography libraries.
.. _`section 3.4.3`: https://tools.ietf.org/html/rfc5849#section-3.4.3
To satisfy `RFC2616 section 5.2`_ item 1, the request argument's uri
attribute MUST be an absolute URI whose netloc part identifies the
origin server or gateway on which the resource resides. Any Host
item of the request argument's headers dict attribute will be
ignored.
.. _`RFC2616 section 5.2`: https://tools.ietf.org/html/rfc2616#section-5.2
"""
norm_params = normalize_parameters(request.params)
uri = normalize_base_string_uri(request.uri)
message = construct_base_string(request.http_method, uri, norm_params).encode('utf-8')
sig = binascii.a2b_base64(request.signature.encode('utf-8'))
alg = _jwt_rs1_signing_algorithm()
key = _prepare_key_plus(alg, rsa_public_key)
verify_ok = alg.verify(message, key, sig)
if not verify_ok:
log.debug('Verify RSA-SHA1 failed: sig base string: %s', message)
return verify_ok
def verify_plaintext(request, client_secret=None, resource_owner_secret=None):
"""Verify a PLAINTEXT signature.
Per `section 3.4`_ of the spec.
.. _`section 3.4`: https://tools.ietf.org/html/rfc5849#section-3.4
"""
signature = sign_plaintext(client_secret, resource_owner_secret)
match = safe_string_equals(signature, request.signature)
if not match:
log.debug('Verify PLAINTEXT failed')
return match
| |
# -*- coding: utf-8 -*-
import json
import mimetypes
import os
from datetime import datetime
from zipfile import ZipFile
from django import forms
from django.conf import settings
from django.core.validators import URLValidator
from django.forms import widgets
from django.forms.extras.widgets import SelectDateWidget
from django.forms.models import modelformset_factory
from django.template.defaultfilters import filesizeformat
from django.utils import six
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from django.utils.translation import trans_real as translation
import commonware
import happyforms
import waffle
from jinja2 import escape as jinja2_escape
from jinja2.filters import do_dictsort
from mpconstants import regions as mpconstants_regions
from quieter_formset.formset import BaseModelFormSet
from tower import ugettext as _, ugettext_lazy as _lazy, ungettext as ngettext
import lib.iarc
import mkt
from lib.video import tasks as vtasks
from mkt import get_user
from mkt.access import acl
from mkt.api.models import Access
from mkt.constants import (CATEGORY_CHOICES, MAX_PACKAGED_APP_SIZE,
ratingsbodies)
from mkt.developers.utils import prioritize_app
from mkt.files.models import FileUpload
from mkt.files.utils import WebAppParser
from mkt.regions import REGIONS_CHOICES_SORTED_BY_NAME
from mkt.regions.utils import parse_region
from mkt.reviewers.models import RereviewQueue
from mkt.site.fields import SeparatedValuesField
from mkt.site.forms import AddonChoiceField
from mkt.site.utils import remove_icons, slug_validator, slugify
from mkt.tags.models import Tag
from mkt.tags.utils import can_edit_restricted_tags, clean_tags
from mkt.translations.fields import TransField
from mkt.translations.forms import TranslationFormMixin
from mkt.translations.models import Translation
from mkt.translations.widgets import TranslationTextarea, TransTextarea
from mkt.versions.models import Version
from mkt.webapps.models import (AddonUser, BlockedSlug, IARCInfo, Preview,
Webapp)
from mkt.webapps.tasks import (index_webapps, set_storefront_data,
update_manifests)
from . import tasks
log = commonware.log.getLogger('mkt.developers')
def region_error(region):
return forms.ValidationError(_('You cannot select {region}.').format(
region=unicode(parse_region(region).name)
))
def toggle_app_for_special_regions(request, app, enabled_regions=None):
"""Toggle for special regions (e.g., China)."""
if not waffle.flag_is_active(request, 'special-regions'):
return
for region in mkt.regions.SPECIAL_REGIONS:
status = app.geodata.get_status(region)
if enabled_regions is not None:
if region.id in enabled_regions:
# If it's not already enabled, mark as pending.
if status != mkt.STATUS_PUBLIC:
# Developer requested for it to be in China.
status = mkt.STATUS_PENDING
value, changed = app.geodata.set_status(region, status)
if changed:
log.info(u'[Webapp:%s] App marked as pending '
u'special region (%s).' % (app, region.slug))
value, changed = app.geodata.set_nominated_date(
region, save=True)
log.info(u'[Webapp:%s] Setting nomination date to '
u'now for region (%s).' % (app, region.slug))
else:
# Developer cancelled request for approval.
status = mkt.STATUS_NULL
value, changed = app.geodata.set_status(
region, status, save=True)
if changed:
log.info(u'[Webapp:%s] App marked as null special '
u'region (%s).' % (app, region.slug))
if status == mkt.STATUS_PUBLIC:
# Reviewer approved for it to be in China.
aer = app.addonexcludedregion.filter(region=region.id)
if aer.exists():
aer.delete()
log.info(u'[Webapp:%s] App included in new special '
u'region (%s).' % (app, region.slug))
else:
# Developer requested for it to be in China.
aer, created = app.addonexcludedregion.get_or_create(
region=region.id)
if created:
log.info(u'[Webapp:%s] App excluded from new special '
u'region (%s).' % (app, region.slug))
class AuthorForm(happyforms.ModelForm):
def clean_user(self):
user = self.cleaned_data['user']
if not user.read_dev_agreement:
raise forms.ValidationError(
_('All team members must have read and agreed to the '
'developer agreement.'))
return user
class Meta:
model = AddonUser
exclude = ('addon',)
class BaseModelFormSet(BaseModelFormSet):
"""
Override the parent's is_valid to prevent deleting all forms.
"""
def is_valid(self):
# clean() won't get called in is_valid() if all the rows are getting
# deleted. We can't allow deleting everything.
rv = super(BaseModelFormSet, self).is_valid()
return rv and not any(self.errors) and not bool(self.non_form_errors())
class BaseAuthorFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
# cleaned_data could be None if it's the empty extra form.
data = filter(None, [f.cleaned_data for f in self.forms
if not f.cleaned_data.get('DELETE', False)])
if not any(d['role'] == mkt.AUTHOR_ROLE_OWNER for d in data):
raise forms.ValidationError(_('Must have at least one owner.'))
if not any(d['listed'] for d in data):
raise forms.ValidationError(
_('At least one team member must be listed.'))
users = [d['user'] for d in data]
if sorted(users) != sorted(set(users)):
raise forms.ValidationError(
_('A team member can only be listed once.'))
AuthorFormSet = modelformset_factory(AddonUser, formset=BaseAuthorFormSet,
form=AuthorForm, can_delete=True, extra=0)
class DeleteForm(happyforms.Form):
reason = forms.CharField(required=False)
def __init__(self, request):
super(DeleteForm, self).__init__(request.POST)
def trap_duplicate(request, manifest_url):
# See if this user has any other apps with the same manifest.
owned = (request.user.addonuser_set
.filter(addon__manifest_url=manifest_url))
if not owned:
return
try:
app = owned[0].addon
except Webapp.DoesNotExist:
return
error_url = app.get_dev_url()
msg = None
if app.status == mkt.STATUS_PUBLIC:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently public. '
'<a href="%s">Edit app</a>')
elif app.status == mkt.STATUS_PENDING:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently pending. '
'<a href="%s">Edit app</a>')
elif app.status == mkt.STATUS_NULL:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently incomplete. '
'<a href="%s">Resume app</a>')
elif app.status == mkt.STATUS_REJECTED:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently rejected. '
'<a href="%s">Edit app</a>')
elif app.status == mkt.STATUS_DISABLED:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently banned on Marketplace. '
'<a href="%s">Edit app</a>')
elif app.disabled_by_user:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently disabled. '
'<a href="%s">Edit app</a>')
if msg:
return msg % (jinja2_escape(app.name), error_url)
def verify_app_domain(manifest_url, exclude=None, packaged=False):
if packaged or waffle.switch_is_active('webapps-unique-by-domain'):
domain = Webapp.domain_from_url(manifest_url)
qs = Webapp.objects.filter(app_domain=domain)
if exclude:
qs = qs.exclude(pk=exclude.pk)
if qs.exists():
raise forms.ValidationError(
_('An app already exists on this domain; '
'only one app per domain is allowed.'))
class PreviewForm(happyforms.ModelForm):
file_upload = forms.FileField(required=False)
upload_hash = forms.CharField(required=False)
# This lets us POST the data URIs of the unsaved previews so we can still
# show them if there were form errors.
unsaved_image_data = forms.CharField(required=False,
widget=forms.HiddenInput)
unsaved_image_type = forms.CharField(required=False,
widget=forms.HiddenInput)
def save(self, addon, commit=True):
if self.cleaned_data:
self.instance.addon = addon
if self.cleaned_data.get('DELETE'):
# Existing preview.
if self.instance.id:
self.instance.delete()
# User has no desire to save this preview.
return
super(PreviewForm, self).save(commit=commit)
if self.cleaned_data['upload_hash']:
upload_hash = self.cleaned_data['upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'preview',
upload_hash)
filetype = (os.path.splitext(upload_hash)[1][1:]
.replace('-', '/'))
if filetype in mkt.VIDEO_TYPES:
self.instance.update(filetype=filetype)
vtasks.resize_video.delay(upload_path, self.instance.pk,
user_pk=mkt.get_user().pk,
set_modified_on=[self.instance])
else:
self.instance.update(filetype='image/png')
tasks.resize_preview.delay(upload_path, self.instance.pk,
set_modified_on=[self.instance])
class Meta:
model = Preview
fields = ('file_upload', 'upload_hash', 'id', 'position')
class JSONField(forms.Field):
def to_python(self, value):
if value == '':
return None
try:
if isinstance(value, basestring):
return json.loads(value)
except ValueError:
pass
return value
class JSONMultipleChoiceField(forms.MultipleChoiceField, JSONField):
widget = forms.CheckboxSelectMultiple
class AdminSettingsForm(PreviewForm):
DELETE = forms.BooleanField(required=False)
mozilla_contact = SeparatedValuesField(forms.EmailField, separator=',',
required=False)
vip_app = forms.BooleanField(required=False)
priority_review = forms.BooleanField(required=False)
banner_regions = JSONMultipleChoiceField(
required=False, choices=mkt.regions.REGIONS_CHOICES_NAME)
banner_message = TransField(required=False)
class Meta:
model = Preview
fields = ('file_upload', 'upload_hash', 'position')
def __init__(self, *args, **kw):
# Note that this form is not inheriting from AddonFormBase, so we have
# to get rid of 'version' ourselves instead of letting the parent class
# do it.
kw.pop('version', None)
# Get the object for the app's promo `Preview` and pass it to the form.
if kw.get('instance'):
addon = kw.pop('instance')
self.instance = addon
self.promo = addon.get_promo()
self.request = kw.pop('request', None)
# Note: After calling `super`, `self.instance` becomes the `Preview`
# object.
super(AdminSettingsForm, self).__init__(*args, **kw)
self.initial['vip_app'] = addon.vip_app
self.initial['priority_review'] = addon.priority_review
if self.instance:
self.initial['mozilla_contact'] = addon.mozilla_contact
self.initial['banner_regions'] = addon.geodata.banner_regions or []
self.initial['banner_message'] = addon.geodata.banner_message_id
@property
def regions_by_id(self):
return mkt.regions.REGIONS_CHOICES_ID_DICT
def clean_position(self):
return -1
def clean_banner_regions(self):
try:
regions = map(int, self.cleaned_data.get('banner_regions'))
except (TypeError, ValueError):
# input data is not a list or data contains non-integers.
raise forms.ValidationError(_('Invalid region(s) selected.'))
return list(regions)
def clean_mozilla_contact(self):
contact = self.cleaned_data.get('mozilla_contact')
if self.cleaned_data.get('mozilla_contact') is None:
return u''
return contact
def save(self, addon, commit=True):
if (self.cleaned_data.get('DELETE') and
'upload_hash' not in self.changed_data and self.promo.id):
self.promo.delete()
elif self.promo and 'upload_hash' in self.changed_data:
self.promo.delete()
elif self.cleaned_data.get('upload_hash'):
super(AdminSettingsForm, self).save(addon, True)
updates = {
'vip_app': self.cleaned_data.get('vip_app'),
}
contact = self.cleaned_data.get('mozilla_contact')
if contact is not None:
updates['mozilla_contact'] = contact
if (self.cleaned_data.get('priority_review') and
not addon.priority_review):
# addon.priority_review gets updated within prioritize_app().
prioritize_app(addon, self.request.user)
else:
updates['priority_review'] = self.cleaned_data.get(
'priority_review')
addon.update(**updates)
geodata = addon.geodata
geodata.banner_regions = self.cleaned_data.get('banner_regions')
geodata.banner_message = self.cleaned_data.get('banner_message')
geodata.save()
uses_flash = self.cleaned_data.get('flash')
af = addon.get_latest_file()
if af is not None:
af.update(uses_flash=bool(uses_flash))
index_webapps.delay([addon.id])
return addon
class BasePreviewFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
at_least_one = False
for form in self.forms:
if (not form.cleaned_data.get('DELETE') and
form.cleaned_data.get('upload_hash') is not None):
at_least_one = True
if not at_least_one:
raise forms.ValidationError(
_('You must upload at least one screenshot or video.'))
PreviewFormSet = modelformset_factory(Preview, formset=BasePreviewFormSet,
form=PreviewForm, can_delete=True,
extra=1)
class NewManifestForm(happyforms.Form):
manifest = forms.URLField()
def __init__(self, *args, **kwargs):
self.is_standalone = kwargs.pop('is_standalone', False)
super(NewManifestForm, self).__init__(*args, **kwargs)
def clean_manifest(self):
manifest = self.cleaned_data['manifest']
# Skip checking the domain for the standalone validator.
if not self.is_standalone:
verify_app_domain(manifest)
return manifest
class NewPackagedAppForm(happyforms.Form):
upload = forms.FileField()
def __init__(self, *args, **kwargs):
self.max_size = kwargs.pop('max_size', MAX_PACKAGED_APP_SIZE)
self.user = kwargs.pop('user', get_user())
self.addon = kwargs.pop('addon', None)
self.file_upload = None
super(NewPackagedAppForm, self).__init__(*args, **kwargs)
def clean_upload(self):
upload = self.cleaned_data['upload']
errors = []
if upload.size > self.max_size:
errors.append({
'type': 'error',
'message': _('Packaged app too large for submission. Packages '
'must be smaller than %s.' % filesizeformat(
self.max_size)),
'tier': 1,
})
# Immediately raise an error, do not process the rest of the view,
# which would read the file.
raise self.persist_errors(errors, upload)
manifest = None
try:
# Be careful to keep this as in-memory zip reading.
manifest = ZipFile(upload, 'r').read('manifest.webapp')
except Exception as e:
errors.append({
'type': 'error',
'message': _('Error extracting manifest from zip file.'),
'tier': 1,
})
origin = None
if manifest:
try:
origin = WebAppParser.decode_manifest(manifest).get('origin')
except forms.ValidationError as e:
errors.append({
'type': 'error',
'message': ''.join(e.messages),
'tier': 1,
})
if origin:
try:
verify_app_domain(origin, packaged=True, exclude=self.addon)
except forms.ValidationError, e:
errors.append({
'type': 'error',
'message': ''.join(e.messages),
'tier': 1,
})
if errors:
raise self.persist_errors(errors, upload)
# Everything passed validation.
self.file_upload = FileUpload.from_post(
upload, upload.name, upload.size, user=self.user)
def persist_errors(self, errors, upload):
"""
Persist the error with this into FileUpload (but do not persist
the file contents, which are too large) and return a ValidationError.
"""
validation = {
'errors': len(errors),
'success': False,
'messages': errors,
}
self.file_upload = FileUpload.objects.create(
user=self.user, name=getattr(upload, 'name', ''),
validation=json.dumps(validation))
# Return a ValidationError to be raised by the view.
return forms.ValidationError(' '.join(e['message'] for e in errors))
class AddonFormBase(TranslationFormMixin, happyforms.ModelForm):
def __init__(self, *args, **kw):
self.request = kw.pop('request')
self.version = kw.pop('version', None)
super(AddonFormBase, self).__init__(*args, **kw)
class Meta:
models = Webapp
fields = ('name', 'slug')
class AppFormBasic(AddonFormBase):
"""Form to edit basic app info."""
slug = forms.CharField(max_length=30, widget=forms.TextInput)
manifest_url = forms.URLField()
description = TransField(
required=True,
label=_lazy(u'Provide a detailed description of your app'),
help_text=_lazy(u'This description will appear on the details page.'),
widget=TransTextarea)
tags = forms.CharField(
label=_lazy(u'Search Keywords:'), required=False,
widget=forms.Textarea(attrs={'rows': 3}),
help_text=_lazy(
u'The search keywords are used to return search results in the '
u'Firefox Marketplace. Be sure to include a keywords that '
u'accurately reflect your app.'))
class Meta:
model = Webapp
fields = ('slug', 'manifest_url', 'description', 'tags')
def __init__(self, *args, **kw):
# Force the form to use app_slug. We want to keep
# this under "slug" so all the js continues to work.
kw.setdefault('initial', {})['slug'] = kw['instance'].app_slug
super(AppFormBasic, self).__init__(*args, **kw)
self.old_manifest_url = self.instance.manifest_url
if self.instance.is_packaged:
# Manifest URL cannot be changed for packaged apps.
del self.fields['manifest_url']
self.initial['tags'] = ', '.join(self.get_tags(self.instance))
def clean_tags(self):
return clean_tags(self.request, self.cleaned_data['tags'])
def get_tags(self, addon):
if can_edit_restricted_tags(self.request):
return list(addon.tags.values_list('tag_text', flat=True))
else:
return list(addon.tags.filter(restricted=False)
.values_list('tag_text', flat=True))
def _post_clean(self):
# Switch slug to app_slug in cleaned_data and self._meta.fields so
# we can update the app_slug field for webapps.
try:
self._meta.fields = list(self._meta.fields)
slug_idx = self._meta.fields.index('slug')
data = self.cleaned_data
if 'slug' in data:
data['app_slug'] = data.pop('slug')
self._meta.fields[slug_idx] = 'app_slug'
super(AppFormBasic, self)._post_clean()
finally:
self._meta.fields[slug_idx] = 'slug'
def clean_slug(self):
slug = self.cleaned_data['slug']
slug_validator(slug, lower=False)
if slug != self.instance.app_slug:
if Webapp.objects.filter(app_slug=slug).exists():
raise forms.ValidationError(
_('This slug is already in use. Please choose another.'))
if BlockedSlug.blocked(slug):
raise forms.ValidationError(_('The slug cannot be "%s". '
'Please choose another.' % slug))
return slug.lower()
def clean_manifest_url(self):
manifest_url = self.cleaned_data['manifest_url']
# Only verify if manifest changed.
if 'manifest_url' in self.changed_data:
verify_app_domain(manifest_url, exclude=self.instance)
return manifest_url
def save(self, addon, commit=False):
# We ignore `commit`, since we need it to be `False` so we can save
# the ManyToMany fields on our own.
addonform = super(AppFormBasic, self).save(commit=False)
addonform.save()
if 'manifest_url' in self.changed_data:
before_url = self.old_manifest_url
after_url = self.cleaned_data['manifest_url']
# If a non-admin edited the manifest URL, add to Re-review Queue.
if not acl.action_allowed(self.request, 'Admin', '%'):
log.info(u'[Webapp:%s] (Re-review) Manifest URL changed '
u'from %s to %s'
% (self.instance, before_url, after_url))
msg = (_(u'Manifest URL changed from {before_url} to '
u'{after_url}')
.format(before_url=before_url, after_url=after_url))
RereviewQueue.flag(self.instance,
mkt.LOG.REREVIEW_MANIFEST_URL_CHANGE, msg)
# Refetch the new manifest.
log.info('Manifest %s refreshed for %s'
% (addon.manifest_url, addon))
update_manifests.delay([self.instance.id])
tags_new = self.cleaned_data['tags']
tags_old = [slugify(t, spaces=True) for t in self.get_tags(addon)]
add_tags = set(tags_new) - set(tags_old)
del_tags = set(tags_old) - set(tags_new)
# Add new tags.
for t in add_tags:
Tag(tag_text=t).save_tag(addon)
# Remove old tags.
for t in del_tags:
Tag(tag_text=t).remove_tag(addon)
return addonform
class AppFormDetails(AddonFormBase):
LOCALES = [(translation.to_locale(k).replace('_', '-'), v)
for k, v in do_dictsort(settings.LANGUAGES)]
default_locale = forms.TypedChoiceField(required=False, choices=LOCALES)
homepage = TransField.adapt(forms.URLField)(required=False)
privacy_policy = TransField(
widget=TransTextarea(), required=True,
label=_lazy(u"Please specify your app's Privacy Policy"))
class Meta:
model = Webapp
fields = ('default_locale', 'homepage', 'privacy_policy')
def clean(self):
# Make sure we have the required translations in the new locale.
required = ['name', 'description']
data = self.cleaned_data
if not self.errors and 'default_locale' in self.changed_data:
fields = dict((k, getattr(self.instance, k + '_id'))
for k in required)
locale = data['default_locale']
ids = filter(None, fields.values())
qs = (Translation.objects.filter(locale=locale, id__in=ids,
localized_string__isnull=False)
.values_list('id', flat=True))
missing = [k for k, v in fields.items() if v not in qs]
if missing:
raise forms.ValidationError(
_('Before changing your default locale you must have a '
'name and description in that locale. '
'You are missing %s.') % ', '.join(map(repr, missing)))
return data
class AppFormMedia(AddonFormBase):
icon_upload_hash = forms.CharField(required=False)
unsaved_icon_data = forms.CharField(required=False,
widget=forms.HiddenInput)
class Meta:
model = Webapp
fields = ('icon_upload_hash', 'icon_type')
def save(self, addon, commit=True):
if self.cleaned_data['icon_upload_hash']:
upload_hash = self.cleaned_data['icon_upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'icon', upload_hash)
dirname = addon.get_icon_dir()
destination = os.path.join(dirname, '%s' % addon.id)
remove_icons(destination)
tasks.resize_icon.delay(upload_path, destination,
mkt.CONTENT_ICON_SIZES,
set_modified_on=[addon])
return super(AppFormMedia, self).save(commit)
class AppSupportFormMixin(object):
def get_default_translation_for(self, field_name):
"""
Return the cleaned_data for the specified field_name, using the
field's default_locale.
"""
default_locale = self.fields[field_name].default_locale
return self.cleaned_data.get(field_name, {}).get(default_locale, '')
def clean_support_fields(self):
"""
Make sure either support email or support url are present.
"""
if ('support_email' in self._errors or
'support_url' in self._errors):
# If there are already errors for those fields, bail out, that
# means at least one of them was filled, the user just needs to
# correct the error.
return
support_email = self.get_default_translation_for('support_email')
support_url = self.get_default_translation_for('support_url')
if not support_email and not support_url:
# Mark the fields as invalid, add an error message on a special
# 'support' field that the template will use if necessary, not on
# both fields individually.
self._errors['support'] = self.error_class(
[_('You must provide either a website, an email, or both.')])
self._errors['support_email'] = self.error_class([''])
self._errors['support_url'] = self.error_class([''])
def clean(self):
cleaned_data = super(AppSupportFormMixin, self).clean()
self.clean_support_fields()
return cleaned_data
class AppFormSupport(AppSupportFormMixin, AddonFormBase):
support_url = TransField.adapt(forms.URLField)(required=False)
support_email = TransField.adapt(forms.EmailField)(required=False)
class Meta:
model = Webapp
fields = ('support_email', 'support_url')
class AppAppealForm(happyforms.Form):
"""
If a developer's app is rejected he can make changes and request
another review.
"""
notes = forms.CharField(
label=_lazy(u'Your comments'),
required=False, widget=forms.Textarea(attrs={'rows': 2}))
def __init__(self, *args, **kw):
self.product = kw.pop('product', None)
super(AppAppealForm, self).__init__(*args, **kw)
def save(self):
version = self.product.versions.latest()
notes = self.cleaned_data['notes']
if notes:
mkt.log(mkt.LOG.WEBAPP_RESUBMIT, self.product, version,
details={'comments': notes})
else:
mkt.log(mkt.LOG.WEBAPP_RESUBMIT, self.product, version)
# Mark app and file as pending again.
self.product.update(status=mkt.WEBAPPS_UNREVIEWED_STATUS)
version.all_files[0].update(status=mkt.WEBAPPS_UNREVIEWED_STATUS)
return version
class PublishForm(happyforms.Form):
# Publish choice wording is slightly different here than with the
# submission flow because the app may have already been published.
mark_safe_lazy = lazy(mark_safe, six.text_type)
PUBLISH_CHOICES = (
(mkt.PUBLISH_IMMEDIATE,
mark_safe_lazy(_lazy(
u'<b>Published</b>: Visible to everyone in the Marketplace and '
u'included in search results and listing pages.'))),
(mkt.PUBLISH_HIDDEN,
mark_safe_lazy(_lazy(
u'<b>Unlisted</b>: Visible to only people with the URL and '
u'does not appear in search results and listing pages.'))),
)
# Used for setting initial form values.
PUBLISH_MAPPING = {
mkt.STATUS_PUBLIC: mkt.PUBLISH_IMMEDIATE,
mkt.STATUS_UNLISTED: mkt.PUBLISH_HIDDEN,
mkt.STATUS_APPROVED: mkt.PUBLISH_PRIVATE,
}
# Use in form processing to set status.
STATUS_MAPPING = dict((v, k) for k, v in PUBLISH_MAPPING.items())
publish_type = forms.TypedChoiceField(
required=False, choices=PUBLISH_CHOICES, widget=forms.RadioSelect(),
initial=0, coerce=int, label=_lazy('App Visibility:'))
limited = forms.BooleanField(
required=False, label=_lazy(
u'<b>Limit to my team</b>: Visible to only Team Members.'))
def __init__(self, *args, **kwargs):
self.addon = kwargs.pop('addon')
super(PublishForm, self).__init__(*args, **kwargs)
limited = False
publish = self.PUBLISH_MAPPING.get(self.addon.status,
mkt.PUBLISH_IMMEDIATE)
if self.addon.status == mkt.STATUS_APPROVED:
# Special case if app is currently private.
limited = True
publish = mkt.PUBLISH_HIDDEN
# Determine the current selection via STATUS to publish choice mapping.
self.fields['publish_type'].initial = publish
self.fields['limited'].initial = limited
# Make the limited label safe so we can display the HTML.
self.fields['limited'].label = mark_safe(self.fields['limited'].label)
def save(self):
publish = self.cleaned_data['publish_type']
limited = self.cleaned_data['limited']
if publish == mkt.PUBLISH_HIDDEN and limited:
publish = mkt.PUBLISH_PRIVATE
status = self.STATUS_MAPPING[publish]
self.addon.update(status=status)
mkt.log(mkt.LOG.CHANGE_STATUS, self.addon.get_status_display(),
self.addon)
# Call update_version, so various other bits of data update.
self.addon.update_version()
# Call to update names and locales if changed.
self.addon.update_name_from_package_manifest()
self.addon.update_supported_locales()
set_storefront_data.delay(self.addon.pk)
class RegionForm(forms.Form):
regions = forms.MultipleChoiceField(
required=False, choices=[], widget=forms.CheckboxSelectMultiple,
label=_lazy(u'Choose the regions your app will be listed in:'),
error_messages={'required':
_lazy(u'You must select at least one region.')})
special_regions = forms.MultipleChoiceField(
required=False, widget=forms.CheckboxSelectMultiple,
choices=[(x.id, x.name) for x in mkt.regions.SPECIAL_REGIONS])
enable_new_regions = forms.BooleanField(
required=False, label=_lazy(u'Enable new regions'))
restricted = forms.TypedChoiceField(
required=False, initial=0, coerce=int,
choices=[(0, _lazy('Make my app available in most regions')),
(1, _lazy('Choose where my app is made available'))],
widget=forms.RadioSelect(attrs={'class': 'choices'}))
def __init__(self, *args, **kw):
self.product = kw.pop('product', None)
self.request = kw.pop('request', None)
super(RegionForm, self).__init__(*args, **kw)
self.fields['regions'].choices = REGIONS_CHOICES_SORTED_BY_NAME()
# This is the list of the user's exclusions as we don't
# want the user's choices to be altered by external
# exclusions e.g. payments availability.
user_exclusions = list(
self.product.addonexcludedregion.values_list('region', flat=True)
)
# If we have excluded regions, uncheck those.
# Otherwise, default to everything checked.
self.regions_before = self.product.get_region_ids(
restofworld=True,
excluded=user_exclusions
)
self.initial = {
'regions': sorted(self.regions_before),
'restricted': int(self.product.geodata.restricted),
'enable_new_regions': self.product.enable_new_regions,
}
# The checkboxes for special regions are
#
# - checked ... if an app has not been requested for approval in
# China or the app has been rejected in China.
#
# - unchecked ... if an app has been requested for approval in
# China or the app has been approved in China.
unchecked_statuses = (mkt.STATUS_NULL, mkt.STATUS_REJECTED)
for region in self.special_region_objs:
if self.product.geodata.get_status(region) in unchecked_statuses:
# If it's rejected in this region, uncheck its checkbox.
if region.id in self.initial['regions']:
self.initial['regions'].remove(region.id)
elif region.id not in self.initial['regions']:
# If it's pending/public, check its checkbox.
self.initial['regions'].append(region.id)
@property
def regions_by_id(self):
return mkt.regions.REGIONS_CHOICES_ID_DICT
@property
def special_region_objs(self):
return mkt.regions.SPECIAL_REGIONS
@property
def special_region_ids(self):
return mkt.regions.SPECIAL_REGION_IDS
@property
def low_memory_regions(self):
return any(region.low_memory for region in self.regions_by_id.values())
@property
def special_region_statuses(self):
"""Returns the null/pending/public status for each region."""
statuses = {}
for region in self.special_region_objs:
statuses[region.id] = self.product.geodata.get_status_slug(region)
return statuses
@property
def special_region_messages(self):
"""Returns the L10n messages for each region's status."""
return self.product.geodata.get_status_messages()
def is_toggling(self):
if not self.request or not hasattr(self.request, 'POST'):
return False
value = self.request.POST.get('toggle-paid')
return value if value in ('free', 'paid') else False
def _product_is_paid(self):
return (self.product.premium_type in mkt.ADDON_PREMIUMS or
self.product.premium_type == mkt.ADDON_FREE_INAPP)
def clean_regions(self):
regions = self.cleaned_data['regions']
if not self.is_toggling():
if not regions:
raise forms.ValidationError(
_('You must select at least one region.'))
return regions
def save(self):
# Don't save regions if we are toggling.
if self.is_toggling():
return
regions = [int(x) for x in self.cleaned_data['regions']]
special_regions = [
int(x) for x in self.cleaned_data['special_regions']
]
restricted = int(self.cleaned_data['restricted'] or 0)
if restricted:
before = set(self.regions_before)
after = set(regions)
log.info(u'[Webapp:%s] App marked as restricted.' % self.product)
# Add new region exclusions.
to_add = before - after
for region in to_add:
aer, created = self.product.addonexcludedregion.get_or_create(
region=region)
if created:
log.info(u'[Webapp:%s] Excluded from new region (%s).'
% (self.product, region))
# Remove old region exclusions.
to_remove = after - before
for region in to_remove:
self.product.addonexcludedregion.filter(
region=region).delete()
log.info(u'[Webapp:%s] No longer excluded from region (%s).'
% (self.product, region))
# If restricted, check how we should handle new regions.
if self.cleaned_data['enable_new_regions']:
self.product.update(enable_new_regions=True)
log.info(u'[Webapp:%s] will be added to future regions.'
% self.product)
else:
self.product.update(enable_new_regions=False)
log.info(u'[Webapp:%s] will not be added to future regions.'
% self.product)
else:
# If not restricted, set `enable_new_regions` to True and remove
# currently excluded regions.
self.product.update(enable_new_regions=True)
self.product.addonexcludedregion.all().delete()
log.info(u'[Webapp:%s] App marked as unrestricted.' % self.product)
self.product.geodata.update(restricted=restricted)
# Toggle region exclusions/statuses for special regions (e.g., China).
toggle_app_for_special_regions(self.request, self.product,
special_regions)
class CategoryForm(happyforms.Form):
categories = forms.MultipleChoiceField(label=_lazy(u'Categories'),
choices=CATEGORY_CHOICES,
widget=forms.CheckboxSelectMultiple)
def __init__(self, *args, **kw):
self.request = kw.pop('request', None)
self.product = kw.pop('product', None)
super(CategoryForm, self).__init__(*args, **kw)
self.cats_before = (list(self.product.categories)
if self.product.categories else [])
self.initial['categories'] = self.cats_before
def max_categories(self):
return mkt.MAX_CATEGORIES
def clean_categories(self):
categories = self.cleaned_data['categories']
set_categories = set(categories)
total = len(set_categories)
max_cat = mkt.MAX_CATEGORIES
if total > max_cat:
# L10n: {0} is the number of categories.
raise forms.ValidationError(ngettext(
'You can have only {0} category.',
'You can have only {0} categories.',
max_cat).format(max_cat))
return categories
def save(self):
after = list(self.cleaned_data['categories'])
self.product.update(categories=after)
toggle_app_for_special_regions(self.request, self.product)
class DevAgreementForm(happyforms.Form):
read_dev_agreement = forms.BooleanField(label=_lazy(u'Agree'),
widget=forms.HiddenInput)
def __init__(self, *args, **kw):
self.instance = kw.pop('instance')
super(DevAgreementForm, self).__init__(*args, **kw)
def save(self):
self.instance.read_dev_agreement = datetime.now()
self.instance.save()
class DevNewsletterForm(happyforms.Form):
"""Devhub newsletter subscription form."""
email = forms.EmailField(
error_messages={'required':
_lazy(u'Please enter a valid email address.')},
widget=forms.TextInput(attrs={'required': '',
'placeholder':
_lazy(u'Your email address')}))
email_format = forms.ChoiceField(
widget=forms.RadioSelect(),
choices=(('H', 'HTML'), ('T', _lazy(u'Text'))),
initial='H')
privacy = forms.BooleanField(
error_messages={'required':
_lazy(u'You must agree to the Privacy Policy.')})
country = forms.ChoiceField(label=_lazy(u'Country'))
def __init__(self, locale, *args, **kw):
regions = mpconstants_regions.get_region(locale).REGIONS
regions = sorted(regions.iteritems(), key=lambda x: x[1])
super(DevNewsletterForm, self).__init__(*args, **kw)
self.fields['country'].choices = regions
self.fields['country'].initial = 'us'
class AppFormTechnical(AddonFormBase):
flash = forms.BooleanField(required=False)
is_offline = forms.BooleanField(required=False)
class Meta:
model = Webapp
fields = ('is_offline', 'public_stats',)
def __init__(self, *args, **kw):
super(AppFormTechnical, self).__init__(*args, **kw)
if self.version.all_files:
self.initial['flash'] = self.version.all_files[0].uses_flash
def save(self, addon, commit=False):
uses_flash = self.cleaned_data.get('flash')
self.instance = super(AppFormTechnical, self).save(commit=True)
if self.version.all_files:
self.version.all_files[0].update(uses_flash=bool(uses_flash))
return self.instance
class TransactionFilterForm(happyforms.Form):
app = AddonChoiceField(queryset=None, required=False, label=_lazy(u'App'))
transaction_type = forms.ChoiceField(
required=False, label=_lazy(u'Transaction Type'),
choices=[(None, '')] + mkt.MKT_TRANSACTION_CONTRIB_TYPES.items())
transaction_id = forms.CharField(
required=False, label=_lazy(u'Transaction ID'))
current_year = datetime.today().year
years = [current_year - x for x in range(current_year - 2012)]
date_from = forms.DateTimeField(
required=False, widget=SelectDateWidget(years=years),
label=_lazy(u'From'))
date_to = forms.DateTimeField(
required=False, widget=SelectDateWidget(years=years),
label=_lazy(u'To'))
def __init__(self, *args, **kwargs):
self.apps = kwargs.pop('apps', [])
super(TransactionFilterForm, self).__init__(*args, **kwargs)
self.fields['app'].queryset = self.apps
class APIConsumerForm(happyforms.ModelForm):
app_name = forms.CharField(required=False)
oauth_leg = forms.ChoiceField(choices=(
('website', _lazy('Web site')),
('command', _lazy('Command line')))
)
redirect_uri = forms.CharField(validators=[URLValidator()], required=False)
class Meta:
model = Access
fields = ('app_name', 'redirect_uri')
def __init__(self, *args, **kwargs):
super(APIConsumerForm, self).__init__(*args, **kwargs)
if self.data.get('oauth_leg') == 'website':
for field in ['app_name', 'redirect_uri']:
self.fields[field].required = True
class AppVersionForm(happyforms.ModelForm):
releasenotes = TransField(widget=TransTextarea(), required=False)
approvalnotes = forms.CharField(
widget=TranslationTextarea(attrs={'rows': 4}), required=False)
publish_immediately = forms.BooleanField(
required=False,
label=_lazy(u'Make this the Active version of my app as soon as it '
u'has been reviewed and approved.'))
class Meta:
model = Version
fields = ('releasenotes', 'approvalnotes')
def __init__(self, *args, **kwargs):
super(AppVersionForm, self).__init__(*args, **kwargs)
self.fields['publish_immediately'].initial = (
self.instance.addon.publish_type == mkt.PUBLISH_IMMEDIATE)
def save(self, *args, **kwargs):
rval = super(AppVersionForm, self).save(*args, **kwargs)
if self.instance.all_files[0].status == mkt.STATUS_PENDING:
# If version is pending, allow changes to publish_type.
if self.cleaned_data.get('publish_immediately'):
publish_type = mkt.PUBLISH_IMMEDIATE
else:
publish_type = mkt.PUBLISH_PRIVATE
self.instance.addon.update(publish_type=publish_type)
return rval
class PreloadTestPlanForm(happyforms.Form):
agree = forms.BooleanField(
widget=forms.CheckboxInput,
label=_lazy(
u'Please consider my app as a candidate to be pre-loaded on a '
u'Firefox OS device. I agree to the terms and conditions outlined '
u'above. I understand that this document is not a commitment to '
u'pre-load my app.'
))
test_plan = forms.FileField(
label=_lazy(u'Upload Your Test Plan (.pdf, .xls under 2.5MB)'),
widget=forms.FileInput(attrs={'class': 'button'}))
def clean(self):
"""Validate test_plan file."""
content_types = [
'application/pdf',
'application/vnd.pdf',
'application/ms-excel',
'application/vnd.ms-excel',
'application/vnd.openxmlformats-officedocument.spreadsheetml.'
'sheet'
]
max_upload_size = 2621440 # 2.5MB
if 'test_plan' not in self.files:
raise forms.ValidationError(_('Test plan required.'))
file = self.files['test_plan']
content_type = mimetypes.guess_type(file.name)[0]
if content_type in content_types:
if file._size > max_upload_size:
msg = _('File too large. Keep size under %s. Current size %s.')
msg = msg % (filesizeformat(max_upload_size),
filesizeformat(file._size))
self._errors['test_plan'] = self.error_class([msg])
raise forms.ValidationError(msg)
else:
msg = (_('Invalid file type {0}. Only {1} files are supported.')
.format(content_type, ', '.join(content_types)))
self._errors['test_plan'] = self.error_class([msg])
raise forms.ValidationError(msg)
return self.cleaned_data
class IARCGetAppInfoForm(happyforms.Form):
submission_id = forms.CharField()
security_code = forms.CharField(max_length=10)
def __init__(self, app, *args, **kwargs):
self.app = app
super(IARCGetAppInfoForm, self).__init__(*args, **kwargs)
def clean_submission_id(self):
submission_id = (
# Also allow "subm-1234" since that's what IARC tool displays.
self.cleaned_data['submission_id'].lower().replace('subm-', ''))
if submission_id.isdigit():
return int(submission_id)
raise forms.ValidationError(_('Please enter a valid submission ID.'))
def clean(self):
cleaned_data = super(IARCGetAppInfoForm, self).clean()
app = self.app
iarc_id = cleaned_data.get('submission_id')
if not app or not iarc_id:
return cleaned_data
if (not settings.IARC_ALLOW_CERT_REUSE and
IARCInfo.objects.filter(submission_id=iarc_id)
.exclude(addon=app).exists()):
del cleaned_data['submission_id']
raise forms.ValidationError(
_('This IARC certificate is already being used for another '
'app. Please create a new IARC Ratings Certificate.'))
return cleaned_data
def save(self, *args, **kwargs):
app = self.app
iarc_id = self.cleaned_data['submission_id']
iarc_code = self.cleaned_data['security_code']
if settings.DEBUG and iarc_id == 0:
# A local developer is being lazy. Skip the hard work.
app.set_iarc_info(iarc_id, iarc_code)
app.set_descriptors([])
app.set_interactives([])
app.set_content_ratings({ratingsbodies.ESRB: ratingsbodies.ESRB_E})
return
# Generate XML.
xml = lib.iarc.utils.render_xml(
'get_app_info.xml',
{'submission_id': iarc_id, 'security_code': iarc_code})
# Process that shizzle.
client = lib.iarc.client.get_iarc_client('services')
resp = client.Get_App_Info(XMLString=xml)
# Handle response.
data = lib.iarc.utils.IARC_XML_Parser().parse_string(resp)
if data.get('rows'):
row = data['rows'][0]
if 'submission_id' not in row:
# [{'ActionStatus': 'No records found. Please try another
# 'criteria.', 'rowId: 1}].
msg = _('Invalid submission ID or security code.')
self._errors['submission_id'] = self.error_class([msg])
log.info('[IARC] Bad GetAppInfo: %s' % row)
raise forms.ValidationError(msg)
# We found a rating, so store the id and code for future use.
app.set_iarc_info(iarc_id, iarc_code)
app.set_descriptors(row.get('descriptors', []))
app.set_interactives(row.get('interactives', []))
app.set_content_ratings(row.get('ratings', {}))
else:
msg = _('Invalid submission ID or security code.')
self._errors['submission_id'] = self.error_class([msg])
log.info('[IARC] Bad GetAppInfo. No rows: %s' % data)
raise forms.ValidationError(msg)
class ContentRatingForm(happyforms.Form):
since = forms.DateTimeField()
class MOTDForm(happyforms.Form):
motd = forms.CharField(widget=widgets.Textarea())
| |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: mimetypes.py
"""Guess the MIME type of a file.
This module defines two useful functions:
guess_type(url, strict=1) -- guess the MIME type and encoding of a URL.
guess_extension(type, strict=1) -- guess the extension for a given MIME type.
It also contains the following, for tuning the behavior:
Data:
knownfiles -- list of files to parse
inited -- flag set when init() has been called
suffix_map -- dictionary mapping suffixes to suffixes
encodings_map -- dictionary mapping suffixes to encodings
types_map -- dictionary mapping suffixes to types
Functions:
init([files]) -- parse a list of files, default knownfiles (on Windows, the
default values are taken from the registry)
read_mime_types(file) -- parse one file, return a dictionary or None
"""
import os
import sys
import posixpath
import urllib
try:
import _winreg
except ImportError:
_winreg = None
__all__ = [
'guess_type', 'guess_extension', 'guess_all_extensions',
'add_type', 'read_mime_types', 'init']
knownfiles = [
'/etc/mime.types',
'/etc/httpd/mime.types',
'/etc/httpd/conf/mime.types',
'/etc/apache/mime.types',
'/etc/apache2/mime.types',
'/usr/local/etc/httpd/conf/mime.types',
'/usr/local/lib/netscape/mime.types',
'/usr/local/etc/httpd/conf/mime.types',
'/usr/local/etc/mime.types']
inited = False
_db = None
class MimeTypes:
"""MIME-types datastore.
This datastore can handle information from mime.types-style files
and supports basic determination of MIME type from a filename or
URL, and can guess a reasonable extension given a MIME type.
"""
def __init__(self, filenames=(), strict=True):
global inited
if not inited:
init()
self.encodings_map = encodings_map.copy()
self.suffix_map = suffix_map.copy()
self.types_map = ({}, {})
self.types_map_inv = ({}, {})
for ext, type in types_map.items():
self.add_type(type, ext, True)
for ext, type in common_types.items():
self.add_type(type, ext, False)
for name in filenames:
self.read(name, strict)
def add_type(self, type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
self.types_map[strict][ext] = type
exts = self.types_map_inv[strict].setdefault(type, [])
if ext not in exts:
exts.append(ext)
def guess_type(self, url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if
the type can't be guessed (no or unknown suffix) or a string
of the form type/subtype, usable for a MIME Content-type
header; and encoding is None for no encoding or the name of
the program used to encode (e.g. compress or gzip). The
mappings are table driven. Encoding suffixes are case
sensitive; type suffixes are first tried case sensitive, then
case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all
mapped to '.tar.gz'. (This is table-driven too, using the
dictionary suffix_map.)
Optional `strict' argument when False adds a bunch of commonly found,
but non-standard types.
"""
scheme, url = urllib.splittype(url)
if scheme == 'data':
comma = url.find(',')
if comma < 0:
return (None, None)
semi = url.find(';', 0, comma)
if semi >= 0:
type = url[:semi]
else:
type = url[:comma]
if '=' in type or '/' not in type:
type = 'text/plain'
return (type, None)
else:
base, ext = posixpath.splitext(url)
while ext in self.suffix_map:
base, ext = posixpath.splitext(base + self.suffix_map[ext])
if ext in self.encodings_map:
encoding = self.encodings_map[ext]
base, ext = posixpath.splitext(base)
else:
encoding = None
types_map = self.types_map[True]
if ext in types_map:
return (types_map[ext], encoding)
if ext.lower() in types_map:
return (types_map[ext.lower()], encoding)
if strict:
return (None, encoding)
types_map = self.types_map[False]
if ext in types_map:
return (types_map[ext], encoding)
if ext.lower() in types_map:
return (types_map[ext.lower()], encoding)
return (
None, encoding)
return
def guess_all_extensions(self, type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data stream,
but would be mapped to the MIME type `type' by guess_type().
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
type = type.lower()
extensions = self.types_map_inv[True].get(type, [])
if not strict:
for ext in self.types_map_inv[False].get(type, []):
if ext not in extensions:
extensions.append(ext)
return extensions
def guess_extension(self, type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension,
including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
extensions = self.guess_all_extensions(type, strict)
if not extensions:
return None
else:
return extensions[0]
def read(self, filename, strict=True):
"""
Read a single mime.types-format file, specified by pathname.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
with open(filename) as fp:
self.readfp(fp, strict)
def readfp(self, fp, strict=True):
"""
Read a single mime.types-format file.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
while 1:
line = fp.readline()
if not line:
break
words = line.split()
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words:
continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
self.add_type(type, '.' + suff, strict)
def read_windows_registry(self, strict=True):
"""
Load the MIME types database from Windows registry.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
if not _winreg:
return
def enum_types(mimedb):
i = 0
while True:
try:
ctype = _winreg.EnumKey(mimedb, i)
except EnvironmentError:
break
try:
ctype = ctype.encode(default_encoding)
except UnicodeEncodeError:
pass
else:
yield ctype
i += 1
default_encoding = sys.getdefaultencoding()
with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, 'MIME\\Database\\Content Type') as mimedb:
for ctype in enum_types(mimedb):
try:
with _winreg.OpenKey(mimedb, ctype) as key:
suffix, datatype = _winreg.QueryValueEx(key, 'Extension')
except EnvironmentError:
continue
if datatype != _winreg.REG_SZ:
continue
try:
suffix = suffix.encode(default_encoding)
except UnicodeEncodeError:
continue
self.add_type(ctype, suffix, strict)
def guess_type(url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if the
type can't be guessed (no or unknown suffix) or a string of the
form type/subtype, usable for a MIME Content-type header; and
encoding is None for no encoding or the name of the program used
to encode (e.g. compress or gzip). The mappings are table
driven. Encoding suffixes are case sensitive; type suffixes are
first tried case sensitive, then case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
to ".tar.gz". (This is table-driven too, using the dictionary
suffix_map).
Optional `strict' argument when false adds a bunch of commonly found, but
non-standard types.
"""
global _db
if _db is None:
init()
return _db.guess_type(url, strict)
def guess_all_extensions(type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_all_extensions(type, strict)
def guess_extension(type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension, including the
leading dot ('.'). The extension is not guaranteed to have been
associated with any particular data stream, but would be mapped to the
MIME type `type' by guess_type(). If no extension can be guessed for
`type', None is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_extension(type, strict)
def add_type(type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
if _db is None:
init()
return _db.add_type(type, ext, strict)
def init(files=None):
global encodings_map
global suffix_map
global types_map
global _db
global inited
global common_types
inited = True
db = MimeTypes()
if files is None:
if _winreg:
db.read_windows_registry()
files = knownfiles
for file in files:
if os.path.isfile(file):
db.read(file)
encodings_map = db.encodings_map
suffix_map = db.suffix_map
types_map = db.types_map[True]
common_types = db.types_map[False]
_db = db
return
def read_mime_types(file):
try:
f = open(file)
except IOError:
return None
db = MimeTypes()
db.readfp(f, True)
return db.types_map[True]
def _default_mime_types():
global types_map
global encodings_map
global common_types
global suffix_map
suffix_map = {'.tgz': '.tar.gz',
'.taz': '.tar.gz',
'.tz': '.tar.gz',
'.tbz2': '.tar.bz2'
}
encodings_map = {'.gz': 'gzip',
'.Z': 'compress',
'.bz2': 'bzip2'
}
types_map = {'.a': 'application/octet-stream',
'.ai': 'application/postscript',
'.aif': 'audio/x-aiff',
'.aifc': 'audio/x-aiff',
'.aiff': 'audio/x-aiff',
'.au': 'audio/basic',
'.avi': 'video/x-msvideo',
'.bat': 'text/plain',
'.bcpio': 'application/x-bcpio',
'.bin': 'application/octet-stream',
'.bmp': 'image/x-ms-bmp',
'.c': 'text/plain',
'.cdf': 'application/x-cdf',
'.cdf': 'application/x-netcdf',
'.cpio': 'application/x-cpio',
'.csh': 'application/x-csh',
'.css': 'text/css',
'.dll': 'application/octet-stream',
'.doc': 'application/msword',
'.dot': 'application/msword',
'.dvi': 'application/x-dvi',
'.eml': 'message/rfc822',
'.eps': 'application/postscript',
'.etx': 'text/x-setext',
'.exe': 'application/octet-stream',
'.gif': 'image/gif',
'.gtar': 'application/x-gtar',
'.h': 'text/plain',
'.hdf': 'application/x-hdf',
'.htm': 'text/html',
'.html': 'text/html',
'.ief': 'image/ief',
'.jpe': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.jpg': 'image/jpeg',
'.js': 'application/x-javascript',
'.ksh': 'text/plain',
'.latex': 'application/x-latex',
'.m1v': 'video/mpeg',
'.man': 'application/x-troff-man',
'.me': 'application/x-troff-me',
'.mht': 'message/rfc822',
'.mhtml': 'message/rfc822',
'.mif': 'application/x-mif',
'.mov': 'video/quicktime',
'.movie': 'video/x-sgi-movie',
'.mp2': 'audio/mpeg',
'.mp3': 'audio/mpeg',
'.mp4': 'video/mp4',
'.mpa': 'video/mpeg',
'.mpe': 'video/mpeg',
'.mpeg': 'video/mpeg',
'.mpg': 'video/mpeg',
'.ms': 'application/x-troff-ms',
'.nc': 'application/x-netcdf',
'.nws': 'message/rfc822',
'.o': 'application/octet-stream',
'.obj': 'application/octet-stream',
'.oda': 'application/oda',
'.p12': 'application/x-pkcs12',
'.p7c': 'application/pkcs7-mime',
'.pbm': 'image/x-portable-bitmap',
'.pdf': 'application/pdf',
'.pfx': 'application/x-pkcs12',
'.pgm': 'image/x-portable-graymap',
'.pl': 'text/plain',
'.png': 'image/png',
'.pnm': 'image/x-portable-anymap',
'.pot': 'application/vnd.ms-powerpoint',
'.ppa': 'application/vnd.ms-powerpoint',
'.ppm': 'image/x-portable-pixmap',
'.pps': 'application/vnd.ms-powerpoint',
'.ppt': 'application/vnd.ms-powerpoint',
'.ps': 'application/postscript',
'.pwz': 'application/vnd.ms-powerpoint',
'.py': 'text/x-python',
'.pyc': 'application/x-python-code',
'.pyo': 'application/x-python-code',
'.qt': 'video/quicktime',
'.ra': 'audio/x-pn-realaudio',
'.ram': 'application/x-pn-realaudio',
'.ras': 'image/x-cmu-raster',
'.rdf': 'application/xml',
'.rgb': 'image/x-rgb',
'.roff': 'application/x-troff',
'.rtx': 'text/richtext',
'.sgm': 'text/x-sgml',
'.sgml': 'text/x-sgml',
'.sh': 'application/x-sh',
'.shar': 'application/x-shar',
'.snd': 'audio/basic',
'.so': 'application/octet-stream',
'.src': 'application/x-wais-source',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc': 'application/x-sv4crc',
'.swf': 'application/x-shockwave-flash',
'.t': 'application/x-troff',
'.tar': 'application/x-tar',
'.tcl': 'application/x-tcl',
'.tex': 'application/x-tex',
'.texi': 'application/x-texinfo',
'.texinfo': 'application/x-texinfo',
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.tr': 'application/x-troff',
'.tsv': 'text/tab-separated-values',
'.txt': 'text/plain',
'.ustar': 'application/x-ustar',
'.vcf': 'text/x-vcard',
'.wav': 'audio/x-wav',
'.wiz': 'application/msword',
'.wsdl': 'application/xml',
'.xbm': 'image/x-xbitmap',
'.xlb': 'application/vnd.ms-excel',
'.xls': 'application/excel',
'.xls': 'application/vnd.ms-excel',
'.xml': 'text/xml',
'.xpdl': 'application/xml',
'.xpm': 'image/x-xpixmap',
'.xsl': 'application/xml',
'.xwd': 'image/x-xwindowdump',
'.zip': 'application/zip'
}
common_types = {'.jpg': 'image/jpg',
'.mid': 'audio/midi',
'.midi': 'audio/midi',
'.pct': 'image/pict',
'.pic': 'image/pict',
'.pict': 'image/pict',
'.rtf': 'application/rtf',
'.xul': 'text/xul'
}
_default_mime_types()
if __name__ == '__main__':
import getopt
USAGE = 'Usage: mimetypes.py [options] type\n\nOptions:\n --help / -h -- print this message and exit\n --lenient / -l -- additionally search of some common, but non-standard\n types.\n --extension / -e -- guess extension instead of type\n\nMore than one type argument may be given.\n'
def usage(code, msg=''):
print USAGE
if msg:
print msg
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hle', ['help',
'lenient',
'extension'])
except getopt.error as msg:
usage(1, msg)
strict = 1
extension = 0
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-l', '--lenient'):
strict = 0
elif opt in ('-e', '--extension'):
extension = 1
for gtype in args:
if extension:
guess = guess_extension(gtype, strict)
if not guess:
print "I don't know anything about type",
print gtype
else:
print guess
else:
guess, encoding = guess_type(gtype, strict)
if not guess:
print "I don't know anything about type", gtype
else:
print 'type:',
print guess, 'encoding:', encoding
| |
# This file is part of beets.
# Copyright 2013, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A Web interface to beets."""
from beets.plugins import BeetsPlugin
from beets import ui
from beets import util
import beets.library
import flask
from flask import g
from werkzeug.routing import BaseConverter, PathConverter
import os
import json
# Utilities.
def _rep(obj, expand=False):
"""Get a flat -- i.e., JSON-ish -- representation of a beets Item or
Album object. For Albums, `expand` dictates whether tracks are
included.
"""
out = dict(obj)
if isinstance(obj, beets.library.Item):
del out['path']
# Get the size (in bytes) of the backing file. This is useful
# for the Tomahawk resolver API.
try:
out['size'] = os.path.getsize(util.syspath(obj.path))
except OSError:
out['size'] = 0
return out
elif isinstance(obj, beets.library.Album):
del out['artpath']
if expand:
out['items'] = [_rep(item) for item in obj.items()]
return out
def json_generator(items, root):
"""Generator that dumps list of beets Items or Albums as JSON
:param root: root key for JSON
:param items: list of :class:`Item` or :class:`Album` to dump
:returns: generator that yields strings
"""
yield '{"%s":[' % root
first = True
for item in items:
if first:
first = False
else:
yield ','
yield json.dumps(_rep(item))
yield ']}'
def resource(name):
"""Decorates a function to handle RESTful HTTP requests for a resource.
"""
def make_responder(retriever):
def responder(ids):
entities = [retriever(id) for id in ids]
entities = [entity for entity in entities if entity]
if len(entities) == 1:
return flask.jsonify(_rep(entities[0]))
elif entities:
return app.response_class(
json_generator(entities, root=name),
mimetype='application/json'
)
else:
return flask.abort(404)
responder.__name__ = 'get_%s' % name
return responder
return make_responder
def resource_query(name):
"""Decorates a function to handle RESTful HTTP queries for resources.
"""
def make_responder(query_func):
def responder(queries):
return app.response_class(
json_generator(query_func(queries), root='results'),
mimetype='application/json'
)
responder.__name__ = 'query_%s' % name
return responder
return make_responder
def resource_list(name):
"""Decorates a function to handle RESTful HTTP request for a list of
resources.
"""
def make_responder(list_all):
def responder():
return app.response_class(
json_generator(list_all(), root=name),
mimetype='application/json'
)
responder.__name__ = 'all_%s' % name
return responder
return make_responder
class IdListConverter(BaseConverter):
"""Converts comma separated lists of ids in urls to integer lists.
"""
def to_python(self, value):
ids = []
for id in value.split(','):
try:
ids.append(int(id))
except ValueError:
pass
return ids
def to_url(self, value):
return ','.join(value)
class QueryConverter(PathConverter):
"""Converts slash separated lists of queries in the url to string list.
"""
def to_python(self, value):
return value.split('/')
def to_url(self, value):
return ','.join(value)
# Flask setup.
app = flask.Flask(__name__)
app.url_map.converters['idlist'] = IdListConverter
app.url_map.converters['query'] = QueryConverter
@app.before_request
def before_request():
g.lib = app.config['lib']
# Items.
@app.route('/item/<idlist:ids>')
@resource('items')
def get_item(id):
return g.lib.get_item(id)
@app.route('/item/')
@app.route('/item/query/')
@resource_list('items')
def all_items():
return g.lib.items()
@app.route('/item/<int:item_id>/file')
def item_file(item_id):
item = g.lib.get_item(item_id)
response = flask.send_file(item.path, as_attachment=True,
attachment_filename=os.path.basename(item.path))
response.headers['Content-Length'] = os.path.getsize(item.path)
return response
@app.route('/item/query/<query:queries>')
@resource_query('items')
def item_query(queries):
return g.lib.items(queries)
# Albums.
@app.route('/album/<idlist:ids>')
@resource('albums')
def get_album(id):
return g.lib.get_album(id)
@app.route('/album/')
@app.route('/album/query/')
@resource_list('albums')
def all_albums():
return g.lib.albums()
@app.route('/album/query/<query:queries>')
@resource_query('albums')
def album_query(queries):
return g.lib.albums(queries)
@app.route('/album/<int:album_id>/art')
def album_art(album_id):
album = g.lib.get_album(album_id)
return flask.send_file(album.artpath)
# Artists.
@app.route('/artist/')
def all_artists():
with g.lib.transaction() as tx:
rows = tx.query("SELECT DISTINCT albumartist FROM albums")
all_artists = [row[0] for row in rows]
return flask.jsonify(artist_names=all_artists)
# Library information.
@app.route('/stats')
def stats():
with g.lib.transaction() as tx:
item_rows = tx.query("SELECT COUNT(*) FROM items")
album_rows = tx.query("SELECT COUNT(*) FROM albums")
return flask.jsonify({
'items': item_rows[0][0],
'albums': album_rows[0][0],
})
# UI.
@app.route('/')
def home():
return flask.render_template('index.html')
# Plugin hook.
class WebPlugin(BeetsPlugin):
def __init__(self):
super(WebPlugin, self).__init__()
self.config.add({
'host': u'',
'port': 8337,
})
def commands(self):
cmd = ui.Subcommand('web', help='start a Web interface')
cmd.parser.add_option('-d', '--debug', action='store_true',
default=False, help='debug mode')
def func(lib, opts, args):
args = ui.decargs(args)
if args:
self.config['host'] = args.pop(0)
if args:
self.config['port'] = int(args.pop(0))
app.config['lib'] = lib
app.run(host=self.config['host'].get(unicode),
port=self.config['port'].get(int),
debug=opts.debug, threaded=True)
cmd.func = func
return [cmd]
| |
import numpy as np
import re
import hashing
import conf
embedding_dict = conf.senna_dict
emb_vocab = conf.senna_vocab
chunk_hash_dict = conf.chunk_hash_dict
chunk_hash_dict_2 = conf.chunk_hash_dict_2
chunk_hash_vocab = conf.chunk_hash_vocab
ner_hash_dict = conf.ner_hash_dict
ner_hash_dict_2 = conf.ner_hash_dict_2
ner_hash_vocab = conf.ner_hash_vocab
chunk_step_length = conf.chunk_step_length
ner_step_length = conf.ner_step_length
chunk_ALL_IOB = conf.chunk_ALL_IOB_encode
chunk_NP_IOB = conf.chunk_NP_IOB_encode
chunk_POS = conf.chunk_POS_encode
ner_POS = conf.ner_POS_encode
ner_chunk = conf.ner_chunk_encode
ner_IOB = conf.ner_IOB_encode
ner_BIOES = conf.ner_BIOES_encode
additional_length = conf.additional_length
gazetteer_length = conf.gazetteer_length
BIOES_gazetteer_length = conf.BIOES_gazetteer_length
def prepare_auto_encoder(batch, task='chunk', gram='tri'):
word_hashing = hashing.sen2matrix(batch, task, gram)
return word_hashing
def prepare_chunk(batch, trigram=False, gram='tri', chunk_type='NP', step_length=chunk_step_length):
if chunk_type=='ALL':
IOB = chunk_ALL_IOB
else:
IOB = chunk_NP_IOB
embedding_index = []
hash_index = []
pos = []
label = []
sentence_length = []
sentences = []
for sentence in batch:
# sentence and sentence pos
sequence = list(sentence[0])
sequence_pos = list(sentence[1])
# for trigram
if trigram:
# add start and end mark
sequence.insert(0, '#')
sequence.append('#')
sequence_pos.insert(0, '#')
sequence_pos.append('#')
_embedding_index = [embedding_dict.get(each.strip().lower(), emb_vocab+1) for each in sequence]
if gram=='tri':
_hash_index = [chunk_hash_dict.get(each.strip().lower(), chunk_hash_vocab+1) for each in sequence]
elif gram=='bi':
_hash_index = [chunk_hash_dict_2.get(each.strip().lower(), chunk_hash_vocab+1) for each in sequence]
sentences.append(sentence[0])
_pos = [chunk_POS[each] for each in sequence_pos]
_label = [IOB[each] for each in sentence[2]]
length = len(_label)
_label.extend([0]*(step_length-length))
_embedding_index.extend([0]*(step_length-length))
_hash_index.extend([0]*(step_length-length))
embedding_index.append(_embedding_index)
hash_index.append(_hash_index)
pos.append(_pos)
label.append(_label)
# record the sentence length for calculate accuracy
sentence_length.append(length)
return np.array(embedding_index), np.array(hash_index), np.array(pos), np.array(label), np.array(sentence_length), sentences
# raw hashing
def prepare_chunk_raw(batch, trigram=False, gram='tri', chunk_type='NP', step_length=chunk_step_length):
if chunk_type=='ALL':
IOB = chunk_ALL_IOB
else:
IOB = chunk_NP_IOB
embedding_index = []
hash_representation = []
pos = []
label = []
sentence_length = []
sentences = []
for sentence in batch:
# sentence and sentence pos
sequence = list(sentence[0])
sequence_pos = list(sentence[1])
# for trigram
if trigram:
# add start and end mark
sequence.insert(0, '#')
sequence.append('#')
sequence_pos.insert(0, '#')
sequence_pos.append('#')
_embedding_index = [embedding_dict.get(each.strip().lower(), emb_vocab+1) for each in sequence]
_hash_representation = hashing.sen2matrix(sequence, task="chunk", gram=gram)
sentences.append(sentence[0])
_pos = [chunk_POS[each] for each in sequence_pos]
_label = [IOB[each] for each in sentence[2]]
length = len(_label)
_label.extend([0]*(step_length-length))
_embedding_index.extend([0]*(step_length-length))
embedding_index.append(_embedding_index)
hash_representation.append(_hash_representation)
pos.append(_pos)
label.append(_label)
# record the sentence length for calculate accuracy
sentence_length.append(length)
return np.array(embedding_index), np.array(hash_representation), np.array(pos), np.array(label), np.array(sentence_length), sentences
# raw hashing
def prepare_ner_raw(batch, trigram=False, gram='tri', form='BIO', step_length=ner_step_length):
embedding_index = []
hash_representation = []
pos = []
chunk = []
label = []
sentence_length = []
sentences = []
for sentence in batch:
# sentence and sentence pos
sequence = list(sentence[0])
sequence_pos = list(sentence[1])
sequence_chunk = list(sentence[2])
# for trigram
if trigram:
# add start and end mark
sequence.insert(0, '#')
sequence.append('#')
sequence_pos.insert(0, '#')
sequence_pos.append('#')
_embedding_index = [embedding_dict.get(each.strip().lower(), emb_vocab+1) for each in sequence]
_hash_representation = hashing.sen2matrix(sequence, task="ner", gram=gram)
sentences.append(sentence[0])
_pos = [ner_POS[each] for each in sequence_pos]
_chunk = [ner_chunk[each] for each in sequence_chunk]
if form=="BIO":
_label = [ner_IOB[each] for each in sentence[3]]
elif form=="BIOES":
_label = [ner_BIOES[each] for each in sentence[3]]
length = len(_label)
_label.extend([0]*(step_length-length))
_embedding_index.extend([0]*(step_length-length))
embedding_index.append(_embedding_index)
hash_representation.append(_hash_representation)
pos.append(_pos)
chunk.append(_chunk)
label.append(_label)
# record the sentence length for calculate accuracy
sentence_length.append(length)
return np.array(embedding_index), np.array(hash_representation), np.array(pos), np.array(chunk), np.array(label), np.array(sentence_length), sentences
def prepare_ner(batch, form='BIO', trigram=False, gram='tri', step_length=ner_step_length):
embedding_index = []
hash_index = []
pos = []
chunk = []
label = []
sentence_length = []
sentences = []
for sentence in batch:
# sentence and sentence pos
sequence = list(sentence[0])
sequence_pos = list(sentence[1])
sequence_chunk = list(sentence[2])
# for trigram
if trigram:
# add start and end mark
sequence.insert(0, '#')
sequence.append('#')
sequence_pos.insert(0, '#')
sequence_pos.append('#')
sequence_chunk.insert(0, '-X-')
sequence_chunk.append('-X-')
_embedding_index = [embedding_dict.get(each.strip().lower(), emb_vocab+1) for each in sequence]
if gram=='tri':
_hash_index = [ner_hash_dict.get(each.strip().lower(), ner_hash_vocab+1) for each in sequence]
elif gram=='bi':
_hash_index = [ner_hash_dict_2.get(each.strip().lower(), ner_hash_vocab+1) for each in sequence]
sentences.append(sentence[0])
_pos = [ner_POS[each] for each in sequence_pos]
_chunk = [ner_chunk[each] for each in sequence_chunk]
if form=="BIO":
_label = [ner_IOB[each] for each in sentence[3]]
elif form=="BIOES":
_label = [ner_BIOES[each] for each in sentence[3]]
length = len(_label)
_label.extend([0]*(step_length-length))
_embedding_index.extend([0]*(step_length-length))
_hash_index.extend([0]*(step_length-length))
embedding_index.append(_embedding_index)
hash_index.append(_hash_index)
pos.append(_pos)
chunk.append(_chunk)
label.append(_label)
# record the sentence length for calculate accuracy
sentence_length.append(length)
return np.array(embedding_index), np.array(hash_index), np.array(pos), np.array(chunk), np.array(label), np.array(sentence_length), sentences
def prepare_gazetteer(batch, gazetteer='senna'):
if gazetteer == 'senna':
LOC = conf.LOC
PER = conf.PER
ORG = conf.ORG
MISC = conf.MISC
elif gazetteer == 'conll':
LOC = conf.LOC_conll
PER = conf.PER_conll
ORG = conf.ORG_conll
MISC = conf.MISC_conll
step_length = ner_step_length
gazetteer_feature = []
sentence_length = []
for sentence in batch:
sequence = list(sentence[0])
sequence = [each.strip().lower() for each in sequence]
length = len(sequence)
sentence_length.append(length)
_gazetteer_feature = np.zeros((length, gazetteer_length))
for i, word in enumerate(sequence):
gazetteer = np.zeros(gazetteer_length)
if word in LOC:
gazetteer[0] = 1
if word in ORG:
gazetteer[1] = 1
if word in PER:
gazetteer[2] = 1
if word in MISC:
gazetteer[3] = 1
_gazetteer_feature[i] = gazetteer
for i in range(length-1):
flag = False
word = sequence[i] + " " + sequence[i+1]
gazetteer = np.zeros((2, gazetteer_length))
if word in LOC:
gazetteer[:,0] = 1
flag = True
if word in ORG:
gazetteer[:,1] = 1
flag = True
if word in PER:
gazetteer[:,2] = 1
flag = True
if word in MISC:
gazetteer[:,3] = 1
flag = True
if flag:
_gazetteer_feature[i:i+2] = gazetteer
for i in range(length-2):
flag = False
word = sequence[i] + " " + sequence[i+1] + " " + sequence[i+2]
gazetteer = np.zeros((3, gazetteer_length))
if word in LOC:
gazetteer[:,0] = 1
flag = True
if word in ORG:
gazetteer[:,1] = 1
flag = True
if word in PER:
gazetteer[:,2] = 1
flag = True
if word in MISC:
gazetteer[:,3] = 1
flag = True
if flag:
_gazetteer_feature[i:i+3] = gazetteer
for i in range(length-3):
flag = False
word = sequence[i] + " " + sequence[i+1] + " " + sequence[i+2] + " " + sequence[i+3]
gazetteer = np.zeros((4, gazetteer_length))
if word in LOC:
gazetteer[:,0] = 1
flag = True
if word in ORG:
gazetteer[:,1] = 1
flag = True
if word in PER:
gazetteer[:,2] = 1
flag = True
if word in MISC:
gazetteer[:,3] = 1
flag = True
if flag:
_gazetteer_feature[i:i+4] = gazetteer
for i in range(length-4):
flag = False
word = sequence[i] + " " + sequence[i+1] + " " + sequence[i+2] + " " + sequence[i+3] + " " + sequence[i+4]
gazetteer = np.zeros((5, gazetteer_length))
if word in LOC:
gazetteer[:,0] = 1
flag = True
if word in ORG:
gazetteer[:,1] = 1
flag = True
if word in PER:
gazetteer[:,2] = 1
flag = True
if word in MISC:
gazetteer[:,3] = 1
flag = True
if flag:
_gazetteer_feature[i:i+5] = gazetteer
gazetteer_feature.append(_gazetteer_feature)
return np.array(gazetteer_feature), np.array(sentence_length)
def prepare_gazetteer_BIOES(batch, gazetteer='senna'):
if gazetteer == 'senna':
LOC = conf.LOC
PER = conf.PER
ORG = conf.ORG
MISC = conf.MISC
elif gazetteer == 'conll':
LOC = conf.LOC_conll
PER = conf.PER_conll
ORG = conf.ORG_conll
MISC = conf.MISC_conll
step_length = ner_step_length
gazetteer_feature = []
sentence_length = []
for sentence in batch:
sequence = list(sentence[0])
sequence = [each.strip().lower() for each in sequence]
length = len(sequence)
sentence_length.append(length)
_gazetteer_feature = np.zeros((length, BIOES_gazetteer_length))
i = 0
while (i<length):
word = sequence[i]
gazetteer = np.zeros(BIOES_gazetteer_length)
if word in LOC:
gazetteer[3] = 1
if word in ORG:
gazetteer[7] = 1
if word in PER:
gazetteer[11] = 1
if word in MISC:
gazetteer[15] = 1
_gazetteer_feature[i] = gazetteer
i = i+1
i = 0
flag = False
while (i<length-1):
word = sequence[i] + " " + sequence[i+1]
gazetteer = np.zeros((2, BIOES_gazetteer_length))
if word in LOC:
gazetteer[0,0] = 1
gazetteer[1,2] = 1
flag = True
if word in ORG:
gazetteer[0,4] = 1
gazetteer[1,6] = 1
flag = True
if word in PER:
gazetteer[0,8] = 1
gazetteer[1,10] = 1
flag = True
if word in MISC:
gazetteer[0,12] = 1
gazetteer[1,14] = 1
flag = True
if flag:
_gazetteer_feature[i:i+2] = gazetteer
i = i+2
flag = False
else:
i = i+1
i = 0
flag = False
while (i<length-2):
word = sequence[i] + " " + sequence[i+1] + " " + sequence[i+2]
gazetteer = np.zeros((3, BIOES_gazetteer_length))
if word in LOC:
gazetteer[0,0] = 1
gazetteer[1,1] = 1
gazetteer[2,2] = 1
flag = True
if word in ORG:
gazetteer[0,4] = 1
gazetteer[1,5] = 1
gazetteer[2,6] = 1
flag = True
if word in PER:
gazetteer[0,8] = 1
gazetteer[1,9] = 1
gazetteer[2,10] = 1
flag = True
if word in MISC:
gazetteer[0,12] = 1
gazetteer[1,13] = 1
gazetteer[2,14] = 1
flag = True
if flag:
_gazetteer_feature[i:i+3] = gazetteer
i = i+3
flag = False
else:
i = i+1
i = 0
flag = False
while (i<length-3):
word = sequence[i] + " " + sequence[i+1] + " " + sequence[i+2] + " " + sequence[i+3]
gazetteer = np.zeros((4, BIOES_gazetteer_length))
if word in LOC:
gazetteer[0,0] = 1
gazetteer[1:3,1] = 1
gazetteer[3,2] = 1
flag = True
if word in ORG:
gazetteer[0,4] = 1
gazetteer[1:3,5] = 1
gazetteer[3,6] = 1
flag = True
if word in PER:
gazetteer[0,8] = 1
gazetteer[1:3,9] = 1
gazetteer[3,10] = 1
flag = True
if word in MISC:
gazetteer[0,12] = 1
gazetteer[1:3,13] = 1
gazetteer[3,14] = 1
flag = True
if flag:
_gazetteer_feature[i:i+4] = gazetteer
i = i+4
flag = False
else:
i = i+1
i = 0
flag = False
while (i<length-4):
word = sequence[i] + " " + sequence[i+1] + " " + sequence[i+2] + " " + sequence[i+3] + " " + sequence[i+4]
gazetteer = np.zeros((5, BIOES_gazetteer_length))
if word in LOC:
gazetteer[0,0] = 1
gazetteer[1:4,1] = 1
gazetteer[4,2] = 1
flag = True
if word in ORG:
gazetteer[0,4] = 1
gazetteer[1:4,5] = 1
gazetteer[4,6] = 1
flag = True
if word in PER:
gazetteer[0,8] = 1
gazetteer[1:4,9] = 1
gazetteer[4,10] = 1
flag = True
if word in MISC:
gazetteer[0,12] = 1
gazetteer[1:4,13] = 1
gazetteer[4,14] = 1
flag = True
if flag:
_gazetteer_feature[i:i+5] = gazetteer
i = i+5
flag = False
else:
i = i+1
gazetteer_feature.append(_gazetteer_feature)
return np.array(gazetteer_feature), np.array(sentence_length)
def prepare_additional(batch, task='chunk'):
if task=='chunk':
step_length = chunk_step_length
elif task=='ner':
step_length = ner_step_length
special_case = re.compile(r'^[^a-zA-Z0-9]*$')
lower_case = re.compile(r'^[a-z]*$')
additional_feature = []
sentence_length = []
for sentence in batch:
# sentence and sentence pos
sequence = list(sentence[0])
length = len(sequence)
sentence_length.append(length)
spelling_feature = np.zeros((length, additional_length))
for i, word in enumerate(sequence):
word = word.strip()
spelling = np.zeros(additional_length)
# is all letter is uppercase, digit or other
# all uppercase
if word.isupper():
spelling[0] = 1
# all lowercase
elif re.match(lower_case, word):
spelling[1] = 1
# all digit
elif word.isdigit():
spelling[2] = 1
# contain special character
elif re.match(special_case, word):
spelling[3] = 1
# end with 's
elif word=="'s":
spelling[4] = 1
else:
spelling[5] = 1
first_ele = word[0]
# start with alpha
if first_ele.isalpha():
# start with upper
if first_ele.isupper():
spelling[6] = 1
# start with digit
elif first_ele.isdigit():
spelling[7] = 1
else:
spelling[8] = 1
spelling_feature[i] = spelling
additional_feature.append(spelling_feature)
return np.array(additional_feature), np.array(sentence_length)
def gazetteer_lookup(batch, chunktag, data, gazetteer='senna'):
if data=="test":
if gazetteer == 'senna':
LOC = conf.LOC
PER = conf.PER
ORG = conf.ORG
MISC = conf.MISC
elif gazetteer == 'conll':
LOC = conf.LOC_conll
PER = conf.PER_conll
ORG = conf.ORG_conll
MISC = conf.MISC_conll
sequence = [each.strip().lower() for each in batch]
length = len(sequence)
for i, word in enumerate(sequence):
if word in LOC:
chunktag[i] = "I-LOC"
elif word in ORG:
chunktag[i] = "I-ORG"
elif word in PER:
chunktag[i] = "I-PER"
elif word in MISC:
chunktag[i] = "I-MISC"
for i in range(length-1):
word = sequence[i] + " " + sequence[i+1]
if word in LOC:
chunktag[i] = "B-LOC"
chunktag[i+1] = "I-LOC"
elif word in ORG:
chunktag[i] = "B-ORG"
chunktag[i+1] = "I-ORG"
elif word in PER:
chunktag[i] = "B-PER"
chunktag[i+1] = "I-PER"
elif word in MISC:
chunktag[i] = "B-MISC"
chunktag[i+1] = "I-MISC"
for i in range(length-2):
word = sequence[i] + " " + sequence[i+1] + " " + sequence[i+2]
if word in LOC:
chunktag[i] = "B-LOC"
chunktag[i+1] = "I-LOC"
chunktag[i+2] = "I-LOC"
elif word in ORG:
chunktag[i] = "B-ORG"
chunktag[i+1] = "I-ORG"
chunktag[i+2] = "I-ORG"
elif word in PER:
chunktag[i] = "B-PER"
chunktag[i+1] = "I-PER"
chunktag[i+2] = "I-PER"
elif word in MISC:
chunktag[i] = "B-MISC"
chunktag[i+1] = "I-MISC"
chunktag[i+2] = "I-MISC"
for i in range(length-3):
word = sequence[i] + " " + sequence[i+1] + " " + sequence[i+2] + " " + sequence[i+3]
if word in LOC:
chunktag[i] = "B-LOC"
chunktag[i+1] = "I-LOC"
chunktag[i+2] = "I-LOC"
chunktag[i+3] = "I-LOC"
elif word in ORG:
chunktag[i] = "B-ORG"
chunktag[i+1] = "I-ORG"
chunktag[i+2] = "I-ORG"
chunktag[i+3] = "I-ORG"
elif word in PER:
chunktag[i] = "B-PER"
chunktag[i+1] = "I-PER"
chunktag[i+2] = "I-PER"
chunktag[i+3] = "I-PER"
elif word in MISC:
chunktag[i] = "B-MISC"
chunktag[i+1] = "I-MISC"
chunktag[i+2] = "I-MISC"
chunktag[i+3] = "I-MISC"
for i in range(length-4):
word = sequence[i] + " " + sequence[i+1] + " " + sequence[i+2] + " " + sequence[i+3] + " " + sequence[i+4]
if word in LOC:
chunktag[i] = "B-LOC"
chunktag[i+1] = "I-LOC"
chunktag[i+2] = "I-LOC"
chunktag[i+3] = "I-LOC"
chunktag[i+4] = "I-LOC"
elif word in ORG:
chunktag[i] = "B-ORG"
chunktag[i+1] = "I-ORG"
chunktag[i+2] = "I-ORG"
chunktag[i+3] = "I-ORG"
chunktag[i+4] = "I-ORG"
elif word in PER:
chunktag[i] = "B-PER"
chunktag[i+1] = "I-PER"
chunktag[i+2] = "I-PER"
chunktag[i+3] = "I-PER"
chunktag[i+4] = "I-PER"
elif word in MISC:
chunktag[i] = "B-MISC"
chunktag[i+1] = "I-MISC"
chunktag[i+2] = "I-MISC"
chunktag[i+3] = "I-MISC"
chunktag[i+4] = "I-MISC"
elif data=="dev":
if gazetteer == 'senna':
LOC = conf.LOC
PER = conf.PER
ORG = conf.ORG
MISC = conf.MISC
elif gazetteer == 'conll':
LOC = conf.LOC_conll
PER = conf.PER_conll
ORG = conf.ORG_conll
MISC = conf.MISC_conll
sequence = [each.strip().lower() for each in batch]
length = len(sequence)
for i, word in enumerate(sequence):
print(word)
if word in LOC:
chunktag[i] = "S-LOC"
elif word in ORG:
chunktag[i] = "S-ORG"
elif word in PER:
chunktag[i] = "S-PER"
elif word in MISC:
chunktag[i] = "S-MISC"
for i in range(length-1):
word = sequence[i] + " " + sequence[i+1]
if word in LOC:
chunktag[i] = "B-LOC"
chunktag[i+1] = "E-LOC"
elif word in ORG:
chunktag[i] = "B-ORG"
chunktag[i+1] = "E-ORG"
elif word in PER:
chunktag[i] = "B-PER"
chunktag[i+1] = "E-PER"
elif word in MISC:
chunktag[i] = "B-MISC"
chunktag[i+1] = "E-MISC"
for i in range(length-2):
word = sequence[i] + " " + sequence[i+1] + " " + sequence[i+2]
if word in LOC:
chunktag[i] = "B-LOC"
chunktag[i+1] = "I-LOC"
chunktag[i+2] = "E-LOC"
elif word in ORG:
chunktag[i] = "B-ORG"
chunktag[i+1] = "I-ORG"
chunktag[i+2] = "E-ORG"
elif word in PER:
chunktag[i] = "B-PER"
chunktag[i+1] = "I-PER"
chunktag[i+2] = "E-PER"
elif word in MISC:
chunktag[i] = "B-MISC"
chunktag[i+1] = "I-MISC"
chunktag[i+2] = "E-MISC"
for i in range(length-3):
word = sequence[i] + " " + sequence[i+1] + " " + sequence[i+2] + " " + sequence[i+3]
if word in LOC:
chunktag[i] = "B-LOC"
chunktag[i+1] = "I-LOC"
chunktag[i+2] = "I-LOC"
chunktag[i+3] = "E-LOC"
elif word in ORG:
chunktag[i] = "B-ORG"
chunktag[i+1] = "I-ORG"
chunktag[i+2] = "I-ORG"
chunktag[i+3] = "E-ORG"
elif word in PER:
chunktag[i] = "B-PER"
chunktag[i+1] = "I-PER"
chunktag[i+2] = "I-PER"
chunktag[i+3] = "E-PER"
elif word in MISC:
chunktag[i] = "B-MISC"
chunktag[i+1] = "I-MISC"
chunktag[i+2] = "I-MISC"
chunktag[i+3] = "E-MISC"
for i in range(length-4):
word = sequence[i] + " " + sequence[i+1] + " " + sequence[i+2] + " " + sequence[i+3] + " " + sequence[i+4]
if word in LOC:
chunktag[i] = "B-LOC"
chunktag[i+1] = "I-LOC"
chunktag[i+2] = "I-LOC"
chunktag[i+3] = "I-LOC"
chunktag[i+4] = "E-LOC"
elif word in ORG:
chunktag[i] = "B-ORG"
chunktag[i+1] = "I-ORG"
chunktag[i+2] = "I-ORG"
chunktag[i+3] = "I-ORG"
chunktag[i+4] = "E-ORG"
elif word in PER:
chunktag[i] = "B-PER"
chunktag[i+1] = "I-PER"
chunktag[i+2] = "I-PER"
chunktag[i+3] = "I-PER"
chunktag[i+4] = "E-PER"
elif word in MISC:
chunktag[i] = "B-MISC"
chunktag[i+1] = "I-MISC"
chunktag[i+2] = "I-MISC"
chunktag[i+3] = "I-MISC"
chunktag[i+4] = "E-MISC"
return chunktag
| |
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:19744")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:19744")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "encryptwallet":
try:
pwd = getpass.getpass(prompt="Enter passphrase: ")
pwd2 = getpass.getpass(prompt="Repeat passphrase: ")
if pwd == pwd2:
access.encryptwallet(pwd)
print "\n---Wallet encrypted. Server stopping, restart to run with encrypted wallet---\n"
else:
print "\n---Passphrases do not match---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Dotcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Dotcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
"""Mongodb implementations of learning searches."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package mongo package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from .. import utilities
from ...abstract_osid.learning import searches as abc_learning_searches
from dlkit.mongo.osid import searches as osid_searches
class ObjectiveSearch(abc_learning_searches.ObjectiveSearch, osid_searches.OsidSearch):
"""``ObjectiveSearch`` defines the interface for specifying objective search options."""
@utilities.arguments_not_none
def search_among_objectives(self, objective_ids):
"""Execute this search among the given list of objectives.
arg: objective_ids (osid.id.IdList): list of objectives
raise: NullArgument - ``objective_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def order_objective_results(self, objective_search_order):
"""Specify an ordering to the search results.
arg: objective_search_order
(osid.learning.ObjectiveSearchOrder): objective search
order
raise: NullArgument - ``objective_search_order`` is ``null``
raise: Unsupported - ``objective_search_order`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_objective_search_record(self, objective_search_record_type):
"""Gets the objective search record corresponding to the given objective search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: objective_search_record_type (osid.type.Type): an
objective search record type
return: (osid.learning.records.ObjectiveSearchRecord) - the
objective search record
raise: NullArgument - ``objective_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_search_record_type(objective_search_record_type)``
is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class ObjectiveSearchResults(abc_learning_searches.ObjectiveSearchResults, osid_searches.OsidSearchResults):
"""This interface provides a means to capture results of a search."""
def get_objectives(self):
"""Gets the objective list resulting from the search.
return: (osid.learning.ObjectiveList) - the objective list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
objectives = property(fget=get_objectives)
def get_objective_query_inspector(self):
"""Gets the inspector for the query to examine the terms used in the search.
return: (osid.learning.ObjectiveQueryInspector) - the query
inspector
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
objective_query_inspector = property(fget=get_objective_query_inspector)
@utilities.arguments_not_none
def get_objective_search_results_record(self, objective_search_record_type):
"""Gets the objective search results record corresponding to the given objective search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: objective_search_record_type (osid.type.Type): an
objective search record type
return: (osid.learning.records.ObjectiveSearchResultsRecord) -
the objective search results record
raise: NullArgument - ``objective_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_search_record_type(objective_search_record_type)``
is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class ActivitySearch(abc_learning_searches.ActivitySearch, osid_searches.OsidSearch):
"""``ActivitySearch`` defines the interface for specifying activity search options."""
@utilities.arguments_not_none
def search_among_activities(self, activity_ids):
"""Execute this search among the given list of activities.
arg: activity_ids (osid.id.IdList): list of activities
raise: NullArgument - ``activity_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def order_activity_results(self, activitiesearch_order):
"""Specify an ordering to the search results.
arg: activitiesearch_order
(osid.learning.ActivitySearchOrder): activity search
order
raise: NullArgument - ``activitiesearch_order`` is ``null``
raise: Unsupported - ``activitiesearch_order`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_activity_search_record(self, activitiesearch_record_type):
"""Gets the activity record corresponding to the given activity search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: activitiesearch_record_type (osid.type.Type): an
activity search record type
return: (osid.learning.records.ActivitySearchRecord) - the
activity search record
raise: NullArgument - ``activitiesearch_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_search_record_type(activitiesearch_record_type)``
is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class ActivitySearchResults(abc_learning_searches.ActivitySearchResults, osid_searches.OsidSearchResults):
"""This interface provides a means to capture results of a search."""
def get_activities(self):
"""Gets the activity list resulting from the search.
return: (osid.learning.ActivityList) - the activity list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
activities = property(fget=get_activities)
def get_activity_query_inspector(self):
"""Gets the inspector for the query to examine the terms used in the search.
return: (osid.learning.ActivityQueryInspector) - the query
inspector
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
activity_query_inspector = property(fget=get_activity_query_inspector)
@utilities.arguments_not_none
def get_activity_search_results_record(self, activitiesearch_record_type):
"""Gets the activity search results record corresponding to the given activity search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: activitiesearch_record_type (osid.type.Type): an
activity search record type
return: (osid.learning.records.ActivitySearchResultsRecord) -
the activity search results record
raise: NullArgument - ``activitiesearch_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_search_record_type(activitiesearch_record_type)``
is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class ProficiencySearch(abc_learning_searches.ProficiencySearch, osid_searches.OsidSearch):
"""The search interface for governing proficiency searches."""
@utilities.arguments_not_none
def search_among_proficiencies(self, proficiency_ids):
"""Execute this search among the given list of proficiencies.
arg: proficiency_ids (osid.id.IdList): list of proficiencies
raise: NullArgument - ``proficiency_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def order_proficiency_results(self, proficiency_search_order):
"""Specify an ordering to the search results.
arg: proficiency_search_order
(osid.learning.ProficiencySearchOrder): proficiency
search order
raise: NullArgument - ``proficiency_search_order`` is ``null``
raise: Unsupported - ``proficiency_search_order`` is not of
this service
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_proficiency_search_record(self, proficiency_search_record_type):
"""Gets the proficiency search record corresponding to the given proficiency search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: proficiency_search_record_type (osid.type.Type): a
proficiency search record type
return: (osid.learning.records.ProficiencySearchRecord) - the
proficiency search record
raise: NullArgument - ``proficiency_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(proficiency_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class ProficiencySearchResults(abc_learning_searches.ProficiencySearchResults, osid_searches.OsidSearchResults):
"""This interface provides a means to capture results of a search."""
def get_proficiencies(self):
"""Gets the proficiency list resulting from a search.
return: (osid.learning.ProficiencyList) - the proficiency list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
proficiencies = property(fget=get_proficiencies)
def get_proficiency_query_inspector(self):
"""Gets the inspector for the query to examine the terms used in the search.
return: (osid.learning.ProficiencyQueryInspector) - the
proficiency query inspector
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
proficiency_query_inspector = property(fget=get_proficiency_query_inspector)
@utilities.arguments_not_none
def get_proficiency_search_results_record(self, proficiency_search_record_type):
"""Gets the proficiency search results record corresponding to the given proficiency search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: proficiency_search_record_type (osid.type.Type): a
proficiency search record type
return: (osid.learning.records.ProficiencySearchResultsRecord) -
the proficiency search results record
raise: NullArgument - ``proficiency_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(proficiency_search_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class ObjectiveBankSearch(abc_learning_searches.ObjectiveBankSearch, osid_searches.OsidSearch):
"""The interface for governing objective bank searches."""
@utilities.arguments_not_none
def search_among_objective_banks(self, objective_bank_ids):
"""Execute this search among the given list of objective banks.
arg: objective_bank_ids (osid.id.IdList): list of objective
banks
raise: NullArgument - ``objective bank_ids`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def order_objective_bank_results(self, objective_bank_search_order):
"""Specify an ordering to the search results.
arg: objective_bank_search_order
(osid.learning.ObjectiveBankSearchOrder): objective bank
search order
raise: NullArgument - ``objective_bank_search_order`` is
``null``
raise: Unsupported - ``objective_bank_search_order`` is not of
this service
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def get_objective_bank_search_record(self, objective_bank_search_record_type):
"""Gets the objective bank search record corresponding to the given objective bank search record ``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: objective_bank_search_record_type (osid.type.Type): an
objective bank search record type
return: (osid.learning.records.ObjectiveBankSearchRecord) - the
objective bank search record
raise: NullArgument - ``objective_bank_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_search_record_type(objective
bank_search_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class ObjectiveBankSearchResults(abc_learning_searches.ObjectiveBankSearchResults, osid_searches.OsidSearchResults):
"""This interface provides a means to capture results of a search."""
def get_objective_banks(self):
"""Gets the objective bank list resulting from the search.
return: (osid.learning.ObjectiveBankList) - the objective bank
list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
objective_banks = property(fget=get_objective_banks)
def get_objective_bank_query_inspector(self):
"""Gets the inspector for the query to examine the terms used in the search.
return: (osid.learning.ObjectiveBankQueryInspector) - the query
inspector
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
objective_bank_query_inspector = property(fget=get_objective_bank_query_inspector)
@utilities.arguments_not_none
def get_objective_bank_search_results_record(self, objective_bank_search_record_type):
"""Gets the objective bank search results record corresponding to the given objective bank search record
``Type``.
This method is used to retrieve an object implementing the
requested record.
arg: objective_bank_search_record_type (osid.type.Type): an
objective bank search record type
return: (osid.learning.records.ObjectiveBankSearchResultsRecord)
- the objective bank search results record
raise: NullArgument - ``objective_bank_search_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_search_record_type(objective
bank_search_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
| |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - anywikidraw
This action is used to call anywikidraw (http://sourceforge.net/projects/anywikidraw/)
@copyright: 2001 by Ken Sugino (sugino@mediaone.net),
2001-2004 by Juergen Hermann <jh@web.de>,
2005 MoinMoin:AlexanderSchremmer,
2005 DiegoOngaro at ETSZONE (diego@etszone.com),
2007-2008 MoinMoin:ThomasWaldmann,
2005-2009 MoinMoin:ReimarBauer
@license: GNU GPL, see COPYING for details.
"""
import os, re
from MoinMoin import log
logging = log.getLogger(__name__)
from MoinMoin import config, wikiutil
from MoinMoin.action import AttachFile, do_show
from MoinMoin.action.AttachFile import _write_stream
action_name = __name__.split('.')[-1]
from MoinMoin.action.twikidraw import gedit_drawing
def attachment_drawing(self, url, text, **kw):
# This is called for displaying a clickable drawing image by text_html formatter.
# XXX text arg is unused!
_ = self.request.getText
pagename, drawing = AttachFile.absoluteName(url, self.page.page_name)
containername = wikiutil.taintfilename(drawing)
drawing_url = AttachFile.getAttachUrl(pagename, containername, self.request, do='modify')
ci = AttachFile.ContainerItem(self.request, pagename, containername)
if not ci.exists():
title = _('Create new drawing "%(filename)s (opens in new window)"') % {'filename': self.text(containername)}
img = self.icon('attachimg') # TODO: we need a new "drawimg" in similar grey style and size
css = 'nonexistent'
return self.url(1, drawing_url, css=css, title=title) + img + self.url(0)
title = _('Edit drawing %(filename)s (opens in new window)') % {'filename': self.text(containername)}
kw['src'] = src = ci.member_url('drawing.png')
kw['css'] = 'drawing'
try:
mapfile = ci.get('drawing.map')
map = mapfile.read()
mapfile.close()
map = map.decode(config.charset)
except (KeyError, IOError, OSError):
map = u''
if map:
# ToDo mapid must become uniq
# we have a image map. inline it and add a map ref to the img tag
# we have also to set a unique ID
mapid = u'ImageMapOf%s%s' % (self.request.uid_generator(pagename), drawing)
map = map.replace(u'id="%s.svg"' % drawing, '')
map = map.replace(u'name="%s.svg"' % drawing, u'name="%s"' % mapid)
# unxml, because 4.01 concrete will not validate />
map = map.replace(u'/>', u'>')
title = _('Clickable drawing: %(filename)s') % {'filename': self.text(containername)}
if 'title' not in kw:
kw['title'] = title
if 'alt' not in kw:
kw['alt'] = kw['title']
kw['usemap'] = '#'+mapid
return self.url(1, drawing_url) + map + self.image(**kw) + self.url(0)
else:
if 'title' not in kw:
kw['title'] = title
if 'alt' not in kw:
kw['alt'] = kw['title']
return self.url(1, drawing_url) + self.image(**kw) + self.url(0)
class AnyWikiDraw(object):
""" anywikidraw action """
def __init__(self, request, pagename, target):
self.request = request
self.pagename = pagename
self.target = target
def save(self):
request = self.request
_ = request.getText
if not wikiutil.checkTicket(request, request.args.get('ticket', '')):
return _('Please use the interactive user interface to use action %(actionname)s!') % {'actionname': 'anywikidraw.save' }
pagename = self.pagename
target = self.target
if not request.user.may.write(pagename):
return _('You are not allowed to save a drawing on this page.')
if not target:
return _("Empty target name given.")
file_upload = request.files.get('filepath')
if not file_upload:
# This might happen when trying to upload file names
# with non-ascii characters on Safari.
return _("No file content. Delete non ASCII characters from the file name and try again.")
filename = request.form['filename']
basepath, basename = os.path.split(filename)
basename, ext = os.path.splitext(basename)
ci = AttachFile.ContainerItem(request, pagename, target)
filecontent = file_upload.stream
content_length = None
if ext == '.svg': # AnyWikiDraw POSTs this first
AttachFile._addLogEntry(request, 'ATTDRW', pagename, target)
ci.truncate()
filecontent = filecontent.read() # read file completely into memory
filecontent = filecontent.replace("\r", "")
elif ext == '.map':
# touch attachment directory to invalidate cache if new map is saved
attach_dir = AttachFile.getAttachDir(request, pagename)
os.utime(attach_dir, None)
filecontent = filecontent.read() # read file completely into memory
filecontent = filecontent.strip()
else:
#content_length = file_upload.content_length
# XXX gives -1 for wsgiref :( If this is fixed, we could use the file obj,
# without reading it into memory completely:
filecontent = filecontent.read()
if filecontent:
ci.put('drawing' + ext, filecontent, content_length)
def render(self):
request = self.request
_ = request.getText
pagename = self.pagename
target = self.target
if not request.user.may.read(pagename):
return _('You are not allowed to view attachments of this page.')
if not target:
return _("Empty target name given.")
ci = AttachFile.ContainerItem(request, pagename, target)
if ci.exists():
drawurl = ci.member_url('drawing.svg')
else:
drawurl = ''
pageurl = request.href(pagename)
saveurl = request.href(pagename, action=action_name, do='save', target=target,
ticket=wikiutil.createTicket(request))
helpurl = request.href("HelpOnActions/AttachFile")
html = """
<p>
<applet code="org.anywikidraw.moinmoin.MoinMoinDrawingApplet.class" codebase="."
archive="%(htdocs)s/applets/anywikidraw/lib/AnyWikiDrawForMoinMoin.jar" width="800" height="620">
<!-- The following parameters are used to tell AnyWikiDraw how to communicate with MoinMoin. -->
<param name="DrawingName" value="%(basename)s.svg">
<param name="DrawingURL" value="%(drawurl)s">
<param name="PageURL" value="%(pageurl)s">
<param name="UploadURL" value="%(saveurl)s">
<!-- The following parameters are used to configure the drawing applet -->
<param name="Locale" value="en">
<!-- The following parameters are used to configure Sun's Java Plug-In -->
<param name="codebase_lookup" value="false">
<param name="classloader_cache" value="false">
<!-- The following makes trouble with FF3 on Ubuntu 9.04 as client and
Apache2 / mod_wsgi on Debian Lenny as server, it seems to confuse
.gz files with gzip content-encoding and fails miserably somehow:
param name="java_arguments" value="-Djnlp.packEnabled=true" -->
<param name="boxborder" value="false">
<param name="centerimage" value="true">
<strong>NOTE:</strong> You need a Java enabled browser to edit the drawing.
</applet>
</p>
""" % dict(
htdocs=request.cfg.url_prefix_static,
basename=wikiutil.escape(target, 1),
drawurl=wikiutil.escape(drawurl, 1),
pageurl=wikiutil.escape(pageurl, 1),
saveurl=wikiutil.escape(saveurl, 1),
)
title = '%s %s:%s' % (_('Edit drawing'), pagename, target)
request.theme.send_title(title, page=request.page, pagename=pagename)
request.write(request.formatter.startContent("content"))
request.write(request.formatter.rawHTML(html))
request.write(request.formatter.endContent())
request.theme.send_footer(pagename)
request.theme.send_closing_html()
def execute(pagename, request):
target = request.values.get('target')
target = wikiutil.taintfilename(target)
awd = AnyWikiDraw(request, pagename, target)
do = request.values.get('do')
if do == 'save':
msg = awd.save()
else:
msg = awd.render()
if msg:
request.theme.add_msg(wikiutil.escape(msg), 'error')
do_show(pagename, request)
| |
# Copyright 2016 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import json
import os
import tempfile
from datetime import datetime, timedelta
from azurelinuxagent.common.event import __event_logger__, add_log_event, MAX_NUMBER_OF_EVENTS, TELEMETRY_LOG_EVENT_ID,\
TELEMETRY_LOG_PROVIDER_ID, EVENTS_DIRECTORY
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.utils import fileutil
from tests.tools import AgentTestCase, MagicMock, patch, skip_if_predicate_true
_MSG_INFO = "This is our test info logging message {0} {1}"
_MSG_WARN = "This is our test warn logging message {0} {1}"
_MSG_ERROR = "This is our test error logging message {0} {1}"
_MSG_VERBOSE = "This is our test verbose logging message {0} {1}"
_DATA = ["arg1", "arg2"]
class TestLogger(AgentTestCase):
def setUp(self):
AgentTestCase.setUp(self)
self.lib_dir = tempfile.mkdtemp()
self.event_dir = os.path.join(self.lib_dir, EVENTS_DIRECTORY)
fileutil.mkdir(self.event_dir)
self.log_file = tempfile.mkstemp(prefix="logfile-")[1]
logger.reset_periodic()
def tearDown(self):
AgentTestCase.tearDown(self)
logger.reset_periodic()
logger.DEFAULT_LOGGER.appenders *= 0
fileutil.rm_dirs(self.event_dir)
@patch('azurelinuxagent.common.logger.Logger.verbose')
@patch('azurelinuxagent.common.logger.Logger.warn')
@patch('azurelinuxagent.common.logger.Logger.error')
@patch('azurelinuxagent.common.logger.Logger.info')
def test_periodic_emits_if_not_previously_sent(self, mock_info, mock_error, mock_warn, mock_verbose):
logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, logger.LogLevel.INFO, *_DATA)
self.assertEqual(1, mock_info.call_count)
logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, logger.LogLevel.ERROR, *_DATA)
self.assertEqual(1, mock_error.call_count)
logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, logger.LogLevel.WARNING, *_DATA)
self.assertEqual(1, mock_warn.call_count)
logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, logger.LogLevel.VERBOSE, *_DATA)
self.assertEqual(1, mock_verbose.call_count)
@patch('azurelinuxagent.common.logger.Logger.verbose')
@patch('azurelinuxagent.common.logger.Logger.warn')
@patch('azurelinuxagent.common.logger.Logger.error')
@patch('azurelinuxagent.common.logger.Logger.info')
def test_periodic_does_not_emit_if_previously_sent(self, mock_info, mock_error, mock_warn, mock_verbose):
# The count does not increase from 1 - the first time it sends the data.
logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA)
self.assertIn(hash(_MSG_INFO), logger.DEFAULT_LOGGER.periodic_messages)
self.assertEqual(1, mock_info.call_count)
logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA)
self.assertIn(hash(_MSG_INFO), logger.DEFAULT_LOGGER.periodic_messages)
self.assertEqual(1, mock_info.call_count)
logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA)
self.assertIn(hash(_MSG_WARN), logger.DEFAULT_LOGGER.periodic_messages)
self.assertEqual(1, mock_warn.call_count)
logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA)
self.assertIn(hash(_MSG_WARN), logger.DEFAULT_LOGGER.periodic_messages)
self.assertEqual(1, mock_warn.call_count)
logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA)
self.assertIn(hash(_MSG_ERROR), logger.DEFAULT_LOGGER.periodic_messages)
self.assertEqual(1, mock_error.call_count)
logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA)
self.assertIn(hash(_MSG_ERROR), logger.DEFAULT_LOGGER.periodic_messages)
self.assertEqual(1, mock_error.call_count)
logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA)
self.assertIn(hash(_MSG_VERBOSE), logger.DEFAULT_LOGGER.periodic_messages)
self.assertEqual(1, mock_verbose.call_count)
logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA)
self.assertIn(hash(_MSG_VERBOSE), logger.DEFAULT_LOGGER.periodic_messages)
self.assertEqual(1, mock_verbose.call_count)
self.assertEqual(4, len(logger.DEFAULT_LOGGER.periodic_messages))
@patch('azurelinuxagent.common.logger.Logger.verbose')
@patch('azurelinuxagent.common.logger.Logger.warn')
@patch('azurelinuxagent.common.logger.Logger.error')
@patch('azurelinuxagent.common.logger.Logger.info')
def test_periodic_emits_after_elapsed_delta(self, mock_info, mock_error, mock_warn, mock_verbose):
logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA)
self.assertEqual(1, mock_info.call_count)
logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA)
self.assertEqual(1, mock_info.call_count)
logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG_INFO)] = datetime.now() - \
logger.EVERY_DAY - logger.EVERY_HOUR
logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA)
self.assertEqual(2, mock_info.call_count)
logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA)
self.assertEqual(1, mock_warn.call_count)
logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA)
self.assertEqual(1, mock_warn.call_count)
logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG_WARN)] = datetime.now() - \
logger.EVERY_DAY - logger.EVERY_HOUR
logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA)
self.assertEqual(2, mock_info.call_count)
logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA)
self.assertEqual(1, mock_error.call_count)
logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA)
self.assertEqual(1, mock_error.call_count)
logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG_ERROR)] = datetime.now() - \
logger.EVERY_DAY - logger.EVERY_HOUR
logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA)
self.assertEqual(2, mock_info.call_count)
logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA)
self.assertEqual(1, mock_verbose.call_count)
logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA)
self.assertEqual(1, mock_verbose.call_count)
logger.DEFAULT_LOGGER.periodic_messages[hash(_MSG_VERBOSE)] = datetime.now() - \
logger.EVERY_DAY - logger.EVERY_HOUR
logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA)
self.assertEqual(2, mock_info.call_count)
@patch('azurelinuxagent.common.logger.Logger.verbose')
@patch('azurelinuxagent.common.logger.Logger.warn')
@patch('azurelinuxagent.common.logger.Logger.error')
@patch('azurelinuxagent.common.logger.Logger.info')
def test_periodic_forwards_message_and_args(self, mock_info, mock_error, mock_warn, mock_verbose):
logger.periodic_info(logger.EVERY_DAY, _MSG_INFO, *_DATA)
mock_info.assert_called_once_with(_MSG_INFO, *_DATA)
logger.periodic_error(logger.EVERY_DAY, _MSG_ERROR, *_DATA)
mock_error.assert_called_once_with(_MSG_ERROR, *_DATA)
logger.periodic_warn(logger.EVERY_DAY, _MSG_WARN, *_DATA)
mock_warn.assert_called_once_with(_MSG_WARN, *_DATA)
logger.periodic_verbose(logger.EVERY_DAY, _MSG_VERBOSE, *_DATA)
mock_verbose.assert_called_once_with(_MSG_VERBOSE, *_DATA)
def test_logger_should_log_in_utc(self):
file_name = "test.log"
file_path = os.path.join(self.tmp_dir, file_name)
test_logger = logger.Logger()
test_logger.add_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, path=file_path)
before_write_utc = datetime.utcnow()
test_logger.info("The time should be in UTC")
with open(file_path, "r") as log_file:
log = log_file.read()
try:
time_in_file = datetime.strptime(log.split(logger.LogLevel.STRINGS[logger.LogLevel.INFO])[0].strip()
, u'%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.fail("Ensure timestamp follows ISO-8601 format + 'Z' for UTC")
# If the time difference is > 5secs, there's a high probability that the time_in_file is in different TZ
self.assertTrue((time_in_file-before_write_utc) <= timedelta(seconds=5))
@patch("azurelinuxagent.common.logger.datetime")
def test_logger_should_log_micro_seconds(self, mock_dt):
# datetime.isoformat() skips ms if ms=0, this test ensures that ms is always set
file_name = "test.log"
file_path = os.path.join(self.tmp_dir, file_name)
test_logger = logger.Logger()
test_logger.add_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, path=file_path)
ts_with_no_ms = datetime.utcnow().replace(microsecond=0)
mock_dt.utcnow = MagicMock(return_value=ts_with_no_ms)
test_logger.info("The time should contain milli-seconds")
with open(file_path, "r") as log_file:
log = log_file.read()
try:
time_in_file = datetime.strptime(log.split(logger.LogLevel.STRINGS[logger.LogLevel.INFO])[0].strip()
, u'%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.fail("Ensure timestamp follows ISO-8601 format and has micro seconds in it")
self.assertEqual(ts_with_no_ms, time_in_file, "Timestamps dont match")
def test_telemetry_logger(self):
mock = MagicMock()
appender = logger.TelemetryAppender(logger.LogLevel.WARNING, mock)
appender.write(logger.LogLevel.WARNING, "--unit-test-WARNING--")
mock.assert_called_with(logger.LogLevel.WARNING, "--unit-test-WARNING--")
mock.reset_mock()
appender.write(logger.LogLevel.ERROR, "--unit-test-ERROR--")
mock.assert_called_with(logger.LogLevel.ERROR, "--unit-test-ERROR--")
mock.reset_mock()
appender.write(logger.LogLevel.INFO, "--unit-test-INFO--")
mock.assert_not_called()
mock.reset_mock()
for i in range(5):
appender.write(logger.LogLevel.ERROR, "--unit-test-ERROR--")
appender.write(logger.LogLevel.INFO, "--unit-test-INFO--")
self.assertEqual(5, mock.call_count) # Only ERROR should be called.
@patch('azurelinuxagent.common.event.EventLogger.save_event')
def test_telemetry_logger_not_on_by_default(self, mock_save):
appender = logger.TelemetryAppender(logger.LogLevel.WARNING, add_log_event)
appender.write(logger.LogLevel.WARNING, 'Cgroup controller "memory" is not mounted. '
'Failed to create a cgroup for extension '
'Microsoft.OSTCExtensions.DummyExtension-1.2.3.4')
self.assertEqual(0, mock_save.call_count)
@patch("azurelinuxagent.common.logger.StdoutAppender.write")
@patch("azurelinuxagent.common.logger.TelemetryAppender.write")
@patch("azurelinuxagent.common.logger.ConsoleAppender.write")
@patch("azurelinuxagent.common.logger.FileAppender.write")
def test_add_appender(self, mock_file_write, mock_console_write, mock_telem_write, mock_stdout_write):
lg = logger.Logger(logger.DEFAULT_LOGGER, "TestLogger1")
lg.add_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, path=self.log_file)
lg.add_appender(logger.AppenderType.TELEMETRY, logger.LogLevel.WARNING, path=add_log_event)
lg.add_appender(logger.AppenderType.CONSOLE, logger.LogLevel.WARNING, path="/dev/null")
lg.add_appender(logger.AppenderType.STDOUT, logger.LogLevel.WARNING, path=None)
counter = 0
for appender in lg.appenders:
if isinstance(appender, logger.FileAppender):
counter += 1
elif isinstance(appender, logger.TelemetryAppender):
counter += 1
elif isinstance(appender, logger.ConsoleAppender):
counter += 1
elif isinstance(appender, logger.StdoutAppender):
counter += 1
# All 4 appenders should have been included.
self.assertEqual(4, counter)
# The write for all the loggers will get called, but the levels are honored in the individual write method
# itself. Each appender has its own test to validate the writing of the log message for different levels.
# For Reference: tests.common.test_logger.TestAppender
lg.warn("Test Log")
self.assertEqual(1, mock_file_write.call_count)
self.assertEqual(1, mock_console_write.call_count)
self.assertEqual(1, mock_telem_write.call_count)
self.assertEqual(1, mock_stdout_write.call_count)
lg.info("Test Log")
self.assertEqual(2, mock_file_write.call_count)
self.assertEqual(2, mock_console_write.call_count)
self.assertEqual(2, mock_telem_write.call_count)
self.assertEqual(2, mock_stdout_write.call_count)
lg.error("Test Log")
self.assertEqual(3, mock_file_write.call_count)
self.assertEqual(3, mock_console_write.call_count)
self.assertEqual(3, mock_telem_write.call_count)
self.assertEqual(3, mock_stdout_write.call_count)
@patch("azurelinuxagent.common.logger.StdoutAppender.write")
@patch("azurelinuxagent.common.logger.TelemetryAppender.write")
@patch("azurelinuxagent.common.logger.ConsoleAppender.write")
@patch("azurelinuxagent.common.logger.FileAppender.write")
def test_set_prefix(self, mock_file_write, mock_console_write, mock_telem_write, mock_stdout_write):
lg = logger.Logger(logger.DEFAULT_LOGGER)
prefix = "YoloLogger"
lg.set_prefix(prefix)
self.assertEquals(lg.prefix, prefix)
lg.add_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, path=self.log_file)
lg.add_appender(logger.AppenderType.TELEMETRY, logger.LogLevel.WARNING, path=add_log_event)
lg.add_appender(logger.AppenderType.CONSOLE, logger.LogLevel.WARNING, path="/dev/null")
lg.add_appender(logger.AppenderType.STDOUT, logger.LogLevel.WARNING, path=None)
lg.error("Test Log")
self.assertIn(prefix, mock_file_write.call_args[0][1])
self.assertIn(prefix, mock_console_write.call_args[0][1])
self.assertIn(prefix, mock_telem_write.call_args[0][1])
self.assertIn(prefix, mock_stdout_write.call_args[0][1])
@patch("azurelinuxagent.common.logger.StdoutAppender.write")
@patch("azurelinuxagent.common.logger.TelemetryAppender.write")
@patch("azurelinuxagent.common.logger.ConsoleAppender.write")
@patch("azurelinuxagent.common.logger.FileAppender.write")
def test_nested_logger(self, mock_file_write, mock_console_write, mock_telem_write, mock_stdout_write):
"""
The purpose of this test is to see if the logger gets correctly created when passed it another logger and also
if the appender correctly gets the messages logged. This is how the ExtHandlerInstance logger works.
I initialize the default logger(logger), then create a new logger(lg) from it, and then log using logger & lg.
See if both logs are flowing through or not.
"""
parent_prefix = "ParentLogger"
child_prefix = "ChildLogger"
logger.add_logger_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, path=self.log_file)
logger.add_logger_appender(logger.AppenderType.TELEMETRY, logger.LogLevel.WARNING, path=add_log_event)
logger.add_logger_appender(logger.AppenderType.CONSOLE, logger.LogLevel.WARNING, path="/dev/null")
logger.add_logger_appender(logger.AppenderType.STDOUT, logger.LogLevel.WARNING)
logger.set_prefix(parent_prefix)
lg = logger.Logger(logger.DEFAULT_LOGGER, child_prefix)
lg.error("Test Log")
self.assertEqual(1, mock_file_write.call_count)
self.assertEqual(1, mock_console_write.call_count)
self.assertEqual(1, mock_telem_write.call_count)
self.assertEqual(1, mock_stdout_write.call_count)
self.assertIn(child_prefix, mock_file_write.call_args[0][1])
self.assertIn(child_prefix, mock_console_write.call_args[0][1])
self.assertIn(child_prefix, mock_telem_write.call_args[0][1])
self.assertIn(child_prefix, mock_stdout_write.call_args[0][1])
logger.error("Test Log")
self.assertEqual(2, mock_file_write.call_count)
self.assertEqual(2, mock_console_write.call_count)
self.assertEqual(2, mock_telem_write.call_count)
self.assertEqual(2, mock_stdout_write.call_count)
self.assertIn(parent_prefix, mock_file_write.call_args[0][1])
self.assertIn(parent_prefix, mock_console_write.call_args[0][1])
self.assertIn(parent_prefix, mock_telem_write.call_args[0][1])
self.assertIn(parent_prefix, mock_stdout_write.call_args[0][1])
@patch("azurelinuxagent.common.event.send_logs_to_telemetry", return_value=True)
@patch("azurelinuxagent.common.conf.get_lib_dir")
def test_telemetry_logger_add_log_event(self, mock_lib_dir, *_):
mock_lib_dir.return_value = self.lib_dir
__event_logger__.event_dir = self.event_dir
prefix = "YoloLogger"
logger.add_logger_appender(logger.AppenderType.TELEMETRY, logger.LogLevel.WARNING, path=add_log_event)
logger.set_prefix(prefix)
logger.warn('Test Log - Warning')
event_files = os.listdir(__event_logger__.event_dir)
self.assertEqual(1, len(event_files))
log_file_event = os.path.join(__event_logger__.event_dir, event_files[0])
try:
with open(log_file_event) as logfile:
logcontent = logfile.read()
# Checking the contents of the event file.
self.assertIn("Test Log - Warning", logcontent)
except Exception as e:
self.assertFalse(True, "The log file looks like it isn't correctly setup for this test. Take a look. "
"{0}".format(e))
@skip_if_predicate_true(lambda: True, "Enable this test when SEND_LOGS_TO_TELEMETRY is enabled")
@patch("azurelinuxagent.common.logger.StdoutAppender.write")
@patch("azurelinuxagent.common.logger.ConsoleAppender.write")
@patch("azurelinuxagent.common.event.send_logs_to_telemetry", return_value=True)
def test_telemetry_logger_verify_maximum_recursion_depths_doesnt_happen(self, *_):
logger.add_logger_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, path="/dev/null")
logger.add_logger_appender(logger.AppenderType.TELEMETRY, logger.LogLevel.WARNING, path=add_log_event)
for i in range(MAX_NUMBER_OF_EVENTS):
logger.warn('Test Log - {0} - 1 - Warning'.format(i))
exception_caught = False
# #1035 was caused due to too many files being written in an error condition. Adding even one more here broke
# the camels back earlier - It would go into an infinite recursion as telemetry would call log, which in turn
# would call telemetry, and so on.
# The description of the fix is given in the comments @ azurelinuxagent.common.logger.Logger#log.write_log.
try:
for i in range(10):
logger.warn('Test Log - {0} - 2 - Warning'.format(i))
except RuntimeError:
exception_caught = True
self.assertFalse(exception_caught, msg="Caught a Runtime Error. This should not have been raised.")
@skip_if_predicate_true(lambda: True, "Enable this test when SEND_LOGS_TO_TELEMETRY is enabled")
@patch("azurelinuxagent.common.logger.StdoutAppender.write")
@patch("azurelinuxagent.common.logger.ConsoleAppender.write")
@patch("azurelinuxagent.common.event.send_logs_to_telemetry", return_value=True)
@patch("azurelinuxagent.common.conf.get_lib_dir")
def test_telemetry_logger_check_all_file_logs_written_when_events_gt_MAX_NUMBER_OF_EVENTS(self, mock_lib_dir, *_):
mock_lib_dir.return_value = self.lib_dir
__event_logger__.event_dir = self.event_dir
no_of_log_statements = MAX_NUMBER_OF_EVENTS + 100
exception_caught = False
prefix = "YoloLogger"
logger.add_logger_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, path=self.log_file)
logger.add_logger_appender(logger.AppenderType.TELEMETRY, logger.LogLevel.WARNING, path=add_log_event)
logger.set_prefix(prefix)
# Calling logger.warn no_of_log_statements times would cause the telemetry appender to writing
# 1000 events into the events dir, and then drop the remaining events. It should not generate the RuntimeError
try:
for i in range(0, no_of_log_statements):
logger.warn('Test Log - {0} - 1 - Warning'.format(i))
except RuntimeError:
exception_caught = True
self.assertFalse(exception_caught, msg="Caught a Runtime Error. This should not have been raised.")
self.assertEqual(MAX_NUMBER_OF_EVENTS, len(os.listdir(__event_logger__.event_dir)))
try:
with open(self.log_file) as logfile:
logcontent = logfile.readlines()
# Checking the last log entry.
# Subtracting 1 as range is exclusive of the upper bound
self.assertIn("WARNING {1} Test Log - {0} - 1 - Warning".format(no_of_log_statements - 1, prefix),
logcontent[-1])
# Checking the 1001st log entry. We know that 1001st entry would generate a PERIODIC message of too many
# events, which should be captured in the log file as well.
self.assertRegex(logcontent[1001], r"(.*WARNING\s*{0}\s*\[PERIODIC\]\s*Too many files under:.*{1}, "
r"current count\:\s*\d+,\s*removing oldest\s*.*)".format(prefix,
self.event_dir))
except Exception as e:
self.assertFalse(True, "The log file looks like it isn't correctly setup for this test. "
"Take a look. {0}".format(e))
class TestAppender(AgentTestCase):
def setUp(self):
AgentTestCase.setUp(self)
self.lib_dir = tempfile.mkdtemp()
self.event_dir = os.path.join(self.lib_dir, EVENTS_DIRECTORY)
fileutil.mkdir(self.event_dir)
self.log_file = tempfile.mkstemp(prefix="logfile-")[1]
logger.reset_periodic()
def tearDown(self):
AgentTestCase.tearDown(self)
logger.reset_periodic()
fileutil.rm_dirs(self.event_dir)
logger.DEFAULT_LOGGER.appenders *= 0
@patch("azurelinuxagent.common.event.send_logs_to_telemetry", return_value=True)
@patch("azurelinuxagent.common.logger.sys.stdout.write")
@patch("azurelinuxagent.common.event.EventLogger.add_log_event")
def test_no_appenders_added(self, mock_add_log_event, mock_sys_stdout, *_):
# Validating no logs are written in any appender
logger.verbose("test-verbose")
logger.info("test-info")
logger.warn("test-warn")
logger.error("test-error")
# Validating Console and File logs
with open(self.log_file) as logfile:
logcontent = logfile.readlines()
self.assertEqual(0, len(logcontent))
# Validating telemetry call
self.assertEqual(0, mock_add_log_event.call_count)
# Validating stdout call
self.assertEqual(0, mock_sys_stdout.call_count)
def test_console_appender(self):
logger.add_logger_appender(logger.AppenderType.CONSOLE, logger.LogLevel.WARNING, path=self.log_file)
logger.verbose("test-verbose")
with open(self.log_file) as logfile:
logcontent = logfile.readlines()
# Levels are honored and Verbose should not be written.
self.assertEqual(0, len(logcontent))
logger.info("test-info")
with open(self.log_file) as logfile:
logcontent = logfile.readlines()
# Levels are honored and Info should not be written.
self.assertEqual(0, len(logcontent))
# As console has a mode of w, it'll always only have 1 line only.
logger.warn("test-warn")
with open(self.log_file) as logfile:
logcontent = logfile.readlines()
self.assertEqual(1, len(logcontent))
self.assertRegex(logcontent[0], r"(.*WARNING\s\w+\s*test-warn.*)")
logger.error("test-error")
with open(self.log_file) as logfile:
logcontent = logfile.readlines()
# Levels are honored and Info, Verbose should not be written.
self.assertEqual(1, len(logcontent))
self.assertRegex(logcontent[0], r"(.*ERROR\s\w+\s*test-error.*)")
def test_file_appender(self):
logger.add_logger_appender(logger.AppenderType.FILE, logger.LogLevel.INFO, path=self.log_file)
logger.verbose("test-verbose")
logger.info("test-info")
logger.warn("test-warn")
logger.error("test-error")
with open(self.log_file) as logfile:
logcontent = logfile.readlines()
# Levels are honored and Verbose should not be written.
self.assertEqual(3, len(logcontent))
self.assertRegex(logcontent[0], r"(.*INFO\s\w+\s*test-info.*)")
self.assertRegex(logcontent[1], r"(.*WARNING\s\w+\s*test-warn.*)")
self.assertRegex(logcontent[2], r"(.*ERROR\s\w+\s*test-error.*)")
@patch("azurelinuxagent.common.event.send_logs_to_telemetry", return_value=True)
@patch("azurelinuxagent.common.event.EventLogger.add_log_event")
def test_telemetry_appender(self, mock_add_log_event, *_):
logger.add_logger_appender(logger.AppenderType.TELEMETRY, logger.LogLevel.WARNING, path=add_log_event)
logger.verbose("test-verbose")
logger.info("test-info")
logger.warn("test-warn")
logger.error("test-error")
self.assertEqual(2, mock_add_log_event.call_count)
@patch("azurelinuxagent.common.logger.sys.stdout.write")
def test_stdout_appender(self, mock_sys_stdout):
logger.add_logger_appender(logger.AppenderType.STDOUT, logger.LogLevel.ERROR)
logger.verbose("test-verbose")
logger.info("test-info")
logger.warn("test-warn")
logger.error("test-error")
# Validating only test-error gets logged and not others.
self.assertEqual(1, mock_sys_stdout.call_count)
| |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.builtins import super
from future.builtins import next
from future.builtins import chr
from future.builtins import dict
from future.builtins import int
from future.builtins import str
from future import standard_library
standard_library.install_hooks()
# !/usr/bin/env python
#
# This scripts has the purpose of analyzing an orthoXML file.
# It does so by providing several methods operating on the file.
#
# Adrian Altenhoff, June 2013
#
import lxml.etree as etree
import collections
import itertools
import io
import re
import sys
from .tools import enum, PROGRESSBAR, setup_progressbar
from .orthoxmlquery import ElementError, OrthoXMLQuery
from .taxonomy import NewickTaxonomy, TaxRangeOrthoXMLTaxonomy, XMLTaxonomy
MAXINT = sys.maxsize
class OrthoXMLParser(object):
ns = {"ns0": "http://orthoXML.org/2011/"} # xml namespace
def __init__(self, filename):
"""creates a OrthoXMLParser object. the parameter filename
needs to be a path pointing to the orthoxml file to be
analyzed."""
self.doc = etree.parse(filename)
self.root = self.doc.getroot()
self.tax = None
self.singletons = None
self._buildMappings() # builds three dictionaries - see def below
def write(self, filename, **kwargs):
"""Write out the (modified) orthoxml file into a new file.
kwargs include:
pretty_print=[True/False],
xml_declaration=[True/False]
encoding=[e.g. 'UTF-8']"""
if 'pretty_print' in kwargs:
self._remove_whitespace()
self.doc.write(filename, **kwargs)
def _remove_whitespace(self):
"""Remove whitespace from text and tail fields so that lxml can
pretty_print the tree"""
for element in self.root.iter():
if element.text is not None and element.text.isspace():
element.text = None
element.tail = None
def mapGeneToXRef(self, id_, typ='protId'):
"""
Looks up id_ (integer id_ number as a string) and type in self._xrefs
dictionary (aka self._xrefs)
Returns lookup value, which is a gene name
"""
if typ is None:
res = id_
else:
tup = (id_, typ)
res = self._xrefs.get(tup, None)
if res is None:
# fallback, if not in dict
gene = OrthoXMLQuery.getGeneFromId(id_, self.root)
# return desired ID typ, otherwise whole element
if typ is not None:
res = gene.get(typ, gene)
else:
res = gene
return res
def getSpeciesSet(self):
return self._species # all species in the xml tree
def getGeneIds(self, speciesFilter=None, tag="name"):
genes = list(self._gene2species.keys())
if speciesFilter is not None:
genes = [g for g in genes
if self._gene2species[g].get(tag, None) in speciesFilter]
return genes
def getToplevelGroups(self):
"""A function yielding the toplevel orthologGroups from the file.
This corresponds to gene families for the purpose of this project."""
return OrthoXMLQuery.getToplevelOrthologGroups(self.root)
def getSubFamilies(self, level, root=None):
"""return a forest of orthologGroup nodes with roots at the
given taxonomic level. This function requires that the
orthologGroup nodes are annotated with a 'property' element
with 'TaxRange' as a name and the actual level in a 'value'
attribute. This is not required by the orthoxml schema."""
if root is None:
root = self.root
return OrthoXMLQuery.getGroupsAtLevel(level, root)
def get_species_below_node(self, node, return_gene_total_count=False):
""" return a set of all species that have a geneRef present beneath
the specified node
:param node: Node of interest
:param return_gene_total_count: boolean flag whether or not to return
the number of genes below this node.
:return:
"""
generef_nodes = OrthoXMLQuery.getGeneRefNodes(node)
species_covered = {self.mapGeneToSpecies(gr.get('id'))
for gr in generef_nodes}
if return_gene_total_count:
return species_covered, len(generef_nodes)
else:
return species_covered
@classmethod
def is_ortholog_group(cls, element):
"""
Returns true if the passed element is an orthologGroup xml node
"""
return element.tag == '{{{ns0}}}orthologGroup'.format(**cls.ns)
@classmethod
def is_paralog_group(cls, element):
"""
Returns true if the passed element is an paralogGroup xml node
"""
return element.tag == '{{{ns0}}}paralogGroup'.format(**cls.ns)
@classmethod
def is_evolutionary_node(cls, element):
"""Returns true if the passed element is an evolutionary event
xml node, i.e. if it is either an orthologGroup or a
paralogGroup element."""
return (cls.is_ortholog_group(element) or
cls.is_paralog_group(element))
def mapGeneToSpecies(self, id_, typ='name'):
"""
Does a lookup in the self._gene2species dict:
key = idnum, return = species name
"""
if self._gene2species is None:
self._buildMappings()
return self._gene2species[id_].get(typ)
def _findSubNodes(self, targetNode, root=None):
"""return all (recursively) found elements with tagname
'targetNode' below the root element. If no root element
is provided, search starts at the document root."""
rootNode = root if root is not None else self.root
return rootNode.findall(".//{{{0}}}{1}".
format(self.ns['ns0'], targetNode))
def _buildMappings(self):
"""Builds two dictionaries:
self._gene2species - keys are ID numbers, values are species
self._xrefs - keys are tuples
(idnum, idtype ['geneId','protId']),
values are gene names
Also builds the set:
self._species - All species names in the xml tree"""
mapping = dict()
xref = dict()
for species in self._findSubNodes("species"):
genes = self._findSubNodes("gene", root=species)
for gene in genes:
id_ = gene.get('id')
mapping[id_] = species
for tag in gene.keys():
if tag != "id":
xref[(id_, tag)] = gene.get(tag)
self._gene2species = mapping
self._xrefs = xref
self._species = frozenset({z.get('name') for z in mapping.values()})
self._levels = frozenset({n.get('value')
for n in self._findSubNodes("property")
if n.get('name') == "TaxRange"})
def getUbiquitusFamilies(self, minCoverage=.5):
families = self.getToplevelGroups()
return [x for x in families if len(self.getGenesPerSpeciesInFam(x)) >=
minCoverage * len(self.getSpeciesSet())]
def getLevels(self):
return self._levels
def getGenesPerSpeciesInFam(self, fam):
"""
Takes a gene family, returns a dictionary:
keys = species names
values = set of geneIds belonging to that species at a level
descended from the family
"""
genes = collections.defaultdict(set)
geneRefs = self._findSubNodes("geneRef", fam)
for gref in geneRefs:
gid = gref.get('id')
sp = self.mapGeneToSpecies(gid)
genes[sp].add(gid)
return genes
def getFamHistory(self):
"""This method returns a FamHistory object initialized with
the most powerful LevelAnalysis handler. The handler depends
on whether the parser contains a tax attribute, set by the
augmentTaxonomyInfo method if called."""
# assure that orthologGroup xml elements annotated with an 'og' attr
if self.root.find(".//*[@og]") is None:
GroupAnnotator(self).annotateDoc()
analyzer = LevelAnalysisFactory().newLevelAnalysis(self)
return FamHistory(self, analyzer)
def augmentTaxonomyInfo(self, tax, propagate_top=False):
"""Assign a taxonomy to the orthoxml file. this taxonomy
is used to augment the xml with the relevant level infos
as 'TaxRange' property tags in orthologGroup elements.
The propagate_top parameter can be should be used to enable
or disable the propagation of all the levels which are
older than the families topmost level. In other words, if
enabled, all families arouse at LUCA, otherwise families
can be invented later on in evolution."""
if self.tax is not None:
raise Exception("a taxonomy can be assigned only once")
self.tax = tax
GroupAnnotator(self).annotateMissingTaxRanges(tax, propagate_top)
def augmentSingletons(self):
GroupAnnotator(self).annotateSingletons()
class TaxonomyFactory(object):
@classmethod
def newTaxonomy(cls, arg):
if isinstance(arg, str):
if arg.endswith('.xml'):
return XMLTaxonomy(arg)
else:
suffix = arg[arg.rindex('.'):]
if suffix in ['.nwk', '.tree', '.newick']:
return NewickTaxonomy(arg)
elif isinstance(arg, OrthoXMLParser):
return TaxRangeOrthoXMLTaxonomy(arg)
else:
raise NotImplementedError("unknown type of Taxonomy")
class GeneFamily(object):
"""GeneFamily(root_element)
Represents one gene family rooted at an orthologous group. """
def __init__(self, root_element):
if not OrthoXMLParser.is_ortholog_group(root_element):
raise ElementError('Not an orthologGroup node')
self.root = root_element
def __repr__(self):
return '{} (id#={})'.format(self.__class__.__name__, self.getFamId())
def __lt__(self, other):
return self._cmp() < other._cmp()
def __gt__(self, other):
return self._cmp() > other._cmp()
def __le__(self, other):
return self._cmp() <= other._cmp()
def __ge__(self, other):
return self._cmp() >= other._cmp()
def __eq__(self, other):
return self._cmp() == other._cmp()
def __str__(self):
return self.root.get('og')
def prefix_match(self, other):
query = self._cmp()
target = other._cmp()
return target[:len(query)] == query
def getMemberGenes(self):
"""
Get all genes belonging to this family. Cached to speed up repeated calls.
"""
if hasattr(self, '_member_genes'):
return self._member_genes
else:
members = self.root.findall('.//{{{ns0}}}geneRef'.
format(**OrthoXMLParser.ns))
self._member_genes = [x.get('id') for x in members]
return self._member_genes
def getFamId(self):
return self.root.get('og')
def _cmp(self):
"""
Enable sorting based on LOFT numbering scheme,
such that (e.g.)
1.1a < 1.1b,
1.1b < 1.1c,
1.1z < 1.1aa,
2 < 10,
10 < n/a,
"""
fam = self.getFamId()
comp = tuple((0, int(num),) if num else (len(alpha.strip('.')),
alpha.strip('.'),) for (num, alpha) in re.findall(r'(\d+)|(\D+)',
fam))
return comp
def getLevels(self):
return OrthoXMLQuery.getLevels(self.root)
def analyzeLevel(self, level):
"""analyze the structure of the family at a given taxonomic
level.
returns a list of GeneFamily object, one per sub-family"""
subFamNodes = OrthoXMLQuery.getGroupsAtLevel(level, self.root)
subFams = [GeneFamily(fam) for fam in subFamNodes]
return subFams
def analyze(self, strategy, level):
"""analyze the history of the GeneFamily using the strategy
passed to the method. The strategy arguement must be an
object providing a analyzeGeneFam method,
e.g. a LevelAnalysis object."""
self.summary = strategy.analyzeGeneFam(self, level)
def write(self, fd, speciesFilter=None, idFormatter=lambda x: x):
species = list(self.summary.keys())
if speciesFilter is not None:
species = [g for g in species if g in speciesFilter]
for spec in self.summary.keys():
if not spec in species:
continue
sumElem = self.summary[spec]
refs = "; ".join([idFormatter(gid) for gid in sumElem.genes])
fd.write("{}\t{}\t{}\t{}:{}\n".format(
self.getFamId(), spec, len(sumElem.genes),
sumElem.typ, refs))
def is_singleton(self):
if not hasattr(self, 'summary'):
return False
return 'SINGLETON' in {x.typ for x in self.summary.values()}
class Singletons(GeneFamily):
def __init__(self, element):
memb = set()
if isinstance(element, str):
memb.add(element)
else:
memb.update(element)
self.members = memb
def getMemberGenes(self):
return self.members
def getFamId(self):
return "n/a"
def getLevels(self):
return None
def analyzeLevel(self, level):
return self
def analyze(self, strategy):
super().analyze(strategy)
for sumElement in self.summary.values():
sumElement.typ = "SINGLETON"
class SummaryOfSpecies(object):
def __init__(self, typ, genes):
self.typ = typ
self.genes = genes
class LevelAnalysisFactory(object):
def __init__(self):
pass
def newLevelAnalysis(self, parser):
"""return a the appropriate LevelAnalysis instance based on
the presents/absence of a taxonomy in the parser"""
if parser.tax is None:
return BasicLevelAnalysis(parser)
elif parser.singletons is None:
return TaxAwareLevelAnalysis(parser, parser.tax)
else:
return SingletonAwareLevelAnalysis(parser,
parser.tax,
parser.singletons)
class BasicLevelAnalysis(object):
GeneClasses = enum("MULTICOPY",
"SINGLECOPY",
"ANCIENT_BUT_LOST",
"LATER_GAINED",
"SINGLETON")
def __init__(self, parser):
self.parser = parser
def analyzeGeneFam(self, fam, level=None):
"""analyzes a single gene family and returns a summary dict.
This method classifies all genes in the family depending on
the number of copies per genome into MULTICOPY or SINGLECOPY
genes."""
spec2genes = collections.defaultdict(set)
for geneId in fam.getMemberGenes():
spec = self.parser.mapGeneToSpecies(geneId)
spec2genes[spec].add(geneId)
summary = dict()
for spec in iter(spec2genes.keys()):
nrMemb = len(spec2genes[spec])
gclass = (self.GeneClasses.MULTICOPY
if nrMemb > 1
else self.GeneClasses.SINGLECOPY)
summary[spec] = SummaryOfSpecies(self.GeneClasses.reverse[gclass],
spec2genes[spec])
return summary
class TaxAwareLevelAnalysis(BasicLevelAnalysis):
def __init__(self, parser, tax):
super().__init__(parser)
self.tax = tax
def addLosses(self, fam, summary, level):
lev = fam.getLevels()
if lev is not None:
# if several levels exist at this node, use oldest one
# that is not older than `level'
lev = self.tax.younger_than_filter(lev, level)
mostGeneralLevel = self.tax.mostGeneralLevel(lev)
speciesCoveredByLevel = self.tax.descendents[mostGeneralLevel]
lostSpecies = speciesCoveredByLevel.difference(summary.keys())
for lost in lostSpecies:
summary[lost] = SummaryOfSpecies("ANCIENT_BUT_LOST", [])
def analyzeGeneFam(self, fam, level):
"""analyzes a single gene family in the context of a known
taxonomic tree.
in addition to the method defined in the base class, this
method adds information of lost genes. It does this by
checking whether a species within the taxonomic range of
the family contains a copy of the gene. if not, it had
been lost."""
summary = super().analyzeGeneFam(fam, level)
self.addLosses(fam, summary, level)
return summary
class SingletonAwareLevelAnalysis(TaxAwareLevelAnalysis):
def __init__(self, parser, tax, singletons):
super().__init__(parser, tax)
self.singletons = singletons
def analyzeGeneFam(self, fam, level):
"""analyzes a single gene family and returns a summary dict.
This method classifies all genes in the family depending on
the number of copies per genome into MULTICOPY or SINGLECOPY
genes."""
spec2genes = collections.defaultdict(set)
for geneId in fam.getMemberGenes():
spec = self.parser.mapGeneToSpecies(geneId)
spec2genes[spec].add(geneId)
summary = dict()
famId = fam.getFamId()
for spec, set_ in spec2genes.items():
if len(set_) > 1:
gclass = self.GeneClasses.MULTICOPY
else:
if famId in self.parser.singletons:
gclass = self.GeneClasses.SINGLETON
else:
gclass = self.GeneClasses.SINGLECOPY
summary[spec] = SummaryOfSpecies(self.GeneClasses.reverse[gclass],
set_)
self.addLosses(fam, summary, level)
return summary
class FamHistory(object):
XRefTag = None
def __init__(self, parser, analyzer):
self.parser = parser
self.analyzer = analyzer
self._geneFamList = list()
def __len__(self):
return self.get_number_of_fams(singletons=True)
def __getitem__(self, key):
return self.geneFamDict[key]
def __iter__(self):
return iter(self.geneFamList)
@property
def geneFamList(self):
return self._geneFamList
@geneFamList.setter
def geneFamList(self, gfam_list):
self._geneFamList = sorted(gfam_list)
def setXRefTag(self, tag):
"""set the attribute name of the 'gene' elements which should
be used for report. defined by orthoxml are 'geneId' and
'protId'. If not defined, the (numerical) internal ids are used."""
self.XRefTag = tag
def analyzeLevel(self, level):
subFamNodes = OrthoXMLQuery.getGroupsAtLevel(level, self.parser.root)
gfamList = [GeneFamily(fam) for fam in subFamNodes]
for gfam in gfamList:
gfam.analyze(self.analyzer, level)
self.geneFamDict = {gf.getFamId(): gf for gf in gfamList}
self.geneFamList = gfamList
self.analyzedLevel = level
def write(self, fd, speciesFilter=None):
"""writes the FamHistory object to a given stream object
in a human readable format.
The stream object needs to have a write(str) method defined.
The optional speciesFilter argument accepts a set of
species names for which the genes in the families are
reported."""
formatter = lambda gid: self.parser.mapGeneToXRef(gid, self.XRefTag)
fd.write("FamilyAnalysis at {}\n".format(self.analyzedLevel))
for fam in self:
fam.write(fd, speciesFilter, idFormatter=formatter)
def __str__(self):
fd = io.StringIO()
self.write(fd)
res = fd.getvalue()
fd.close()
return res
def compare(self, other):
""" compares two FamilyHistory objects in linear time
algorithm implemented in Comparer class
"""
c = Comparer(self, other)
c.run()
return c.comp
def get_number_of_fams(self, singletons=False):
if singletons:
return len(self.geneFamList)
return len([x for x in self if not x.is_singleton()])
class Comparer(object):
"""
Compares two FamilyHistory objects in linear time.
Algorithm:
Init:
preprocess input by sorting FamilyHistory geneFamLists
maintain pointers to the lists that only move forwards
Run:
If families exactly match:
Mark as FamIdent
advance both lists one element
Else if family2 is a prefix match of family 1:
While family1.prefix_match(family2):
Mark as FamDupl
advance list 2
advance list 1
Else if family 1 < family 2 (according to sort comparison)
While family 1 < family 2:
Mark as FamLost
advance list 1
Else if family 1 > family 2
While family 1 > family 2:
Mark as FamNovel
advance list 2
When list 1 is exhausted:
mark all remaining members of list 2 as FamNovel
When list 2 is exhausted:
mark all remaining members of list 1 as FamLost
End:
When both lists are exhausted
"""
def __init__(self, fam_history_1, fam_history_2):
self.i1 = iter(fam_history_1.geneFamList)
self.i2 = iter(fam_history_2.geneFamList)
self.f1 = None
self.f2 = None
self.advance_i1()
self.advance_i2()
self.comp = LevelComparisonResult(fam_history_1.analyzedLevel,
fam_history_2.analyzedLevel)
def run(self):
while self.f1 is not None and self.f2 is not None:
if self.f1 == self.f2:
self.ident()
elif self.f1.prefix_match(self.f2):
self.dupl()
elif self.f1 < self.f2:
self.lost()
elif self.f1 > self.f2:
self.novel()
else:
raise Exception('Unexpected state')
if self.f1 is None:
self.l1_exhausted()
if self.f2 is None:
self.l2_exhausted()
def ident(self):
self.comp.addFamily(FamIdent(self.f1.getFamId()))
self.advance_i1()
self.advance_i2()
def dupl(self):
m = list()
while self.f1.prefix_match(self.f2):
m.append(self.f2)
self.advance_i2()
if self.f2 is None:
break
self.comp.addFamily(FamDupl(self.f1, m))
self.advance_i1()
def lost(self):
while self.f1 < self.f2 and not self.f1.prefix_match(self.f2):
self.comp.addFamily(FamLost(self.f1.getFamId()))
self.advance_i1()
if self.f1 is None:
break
def novel(self):
while self.f1 > self.f2 and not self.f1.prefix_match(self.f2):
_Event = (FamSingleton if self.f2.is_singleton()
else FamNovel) # this check is probably redundant
# because there shouldn't be any
# singletons if l1 is not exhausted
# (singletons are always annotated last)
self.comp.addFamily(_Event(self.f2.getFamId()))
self.advance_i2()
if self.f2 is None:
break
def advance_i1(self):
try:
val = next(self.i1)
except StopIteration:
val = None
self.f1 = val
def advance_i2(self):
try:
val = next(self.i2)
except StopIteration:
val = None
self.f2 = val
def l1_exhausted(self):
while self.f2 is not None:
_Event = (FamSingleton if self.f2.is_singleton()
else FamNovel)
self.comp.addFamily(_Event(self.f2.getFamId()))
self.advance_i2()
def l2_exhausted(self):
while self.f1 is not None:
self.comp.addFamily(FamLost(self.f1.getFamId()))
self.advance_i1()
class FamEvent(object):
event = None
def __init__(self, fam):
self.name = str(fam)
self.fam = fam
def __str__(self):
return "{}: {}\n".format(self.name, self.event)
def __eq__(self, other):
return self.name == other.name and self.event == other.event
def __repr__(self):
return "{}: {}".format(self.name, self.event)
class FamIdent(FamEvent):
event = "identical"
class FamNovel(FamEvent):
event = "novel"
class FamLost(FamEvent):
event = "lost"
class FamSingleton(FamEvent):
"""A single-member 'family' consisting of a gene that doesn't
match any other family. Only occurs in a leaf."""
event = "singleton"
class FamDupl(FamEvent):
event = "duplicated"
def __init__(self, fam, subfam):
super().__init__(fam)
if isinstance(subfam, list):
subfam_names = "; ".join([str(s) for s in subfam])
else:
subfam_names = str(subfam)
self.into = subfam_names
self.subfams = subfam
def __str__(self):
return self.write()
def __eq__(self, other):
return super().__eq__(other) and self.into == other.into
def write(self):
''' Construct output for printing. Shows the ids of the duplicated
GeneFamily and the resulting subfamilies. It also lists the
members of the original GeneFamily and the new subfamilies.
Sample output:
-------
114 --> 114.2a; 114.2b
114: 47953; 28082; 11418; 29862; 117; 50097; 13634; 50845; 14300
114.2a: 50097; 13634
114.2b: 50845; 14300
-------
'''
output = "-------\n"
output += ("{} --> {}\n".format(self.fam.getFamId(), self.into))
members = "; ".join(self.fam.getMemberGenes())
output += ("{}: {}\n".format(self.fam.getFamId(), members))
for group in self.subfams:
members = "; ".join(group.getMemberGenes())
output += ("{}: {}\n".format(group.getFamId(), members))
output += ("-------\n")
return output
class LevelComparisonResult(object):
"""
Result of comparing two 'FamHistory' objects at different levels
on the tree.
"""
groups = {'identical': 0,
'duplicated': 1,
'lost': 2,
'novel': 3,
'singleton': 4}
groups_back = {0: 'identical',
1: 'duplicated',
2: 'lost',
3: 'novel',
4: 'singleton'}
@staticmethod
def sort_key(item):
if item.name == 'n/a':
return (MAXINT,)
return tuple((int(num) if num else alpha) for
(num, alpha) in re.findall(r'(\d+)|(\D+)', item.name))
def group_sort_key(self, item):
return tuple(itertools.chain((self.group_key(item),),
self.sort_key(item)))
def group_key(self, item):
return self.groups[item.event]
def __init__(self, lev1, lev2):
self.fams_dict = dict()
self.lev1 = lev1
self.lev2 = lev2
def __str__(self):
fd = io.StringIO()
self.write(fd)
res = fd.getvalue()
fd.close()
return res
def __getitem__(self, key):
return self.fams_dict[key]
def __iter__(self):
return iter(self.fams)
@property
def fams(self):
return sorted(self.fams_dict.values(), key=self.sort_key)
def addFamily(self, famEvent):
self.fams_dict[famEvent.name] = famEvent
def write(self, fd):
fd.write("\nLevelComparisonResult between taxlevel {} and {}\n".
format(self.lev1, self.lev2))
for fam in self:
fd.writelines(str(fam))
def summarise(self):
summary = {'identical': 0,
'novel': 0,
'lost': 0,
'duplicated': 0,
'singleton': 0}
for family in self.fams_dict.values():
if family.event in ['identical', 'lost', 'singleton', 'novel']:
summary[family.event] += 1
elif family.event == 'duplicated':
summary['duplicated'] += len(family.into.split('; '))
self.summary = summary
return summary
def filter(self, filters):
if not isinstance(filters, set):
filters = {filters}
if not filters.issubset({'identical', 'lost', 'singleton', 'novel',
'duplicated'}):
raise Exception('Unexpected filters: {0}'.format(filters))
return [x for x in self if x.event in filters]
def group_fams(self):
return dict([(self.groups_back[num], list(iterator))
for (num, iterator)
in itertools.groupby(sorted(self.fams_dict.values(),
key=self.group_sort_key),
self.group_key)])
class GroupAnnotator(object):
"""this class annotates orthologGroup elements with the numbering
schema presented in the LOFT paper:
van der Heijden, Snel, van Noort, Huynen
Orthology prediction at scalable resolution by phylogenetic tree analysis.
BMC Bioinformatics, 2007, 8, 83
and adding additional property tags with skipped TaxRange levels."""
def __init__(self, parser, taxrange_2_taxid=None):
self.parser = parser
self.ns = parser.ns
self.taxrange_2_taxid = {rng: str(val) for rng, val in taxrange_2_taxid.items()} if taxrange_2_taxid is not None else {}
def _getNextSubId(self, idx):
"""helper method to return the next number at a given depth of
duplication (idx)"""
while len(self.dupCnt) < idx:
self.dupCnt.append(0)
self.dupCnt[idx - 1] += 1
return self.dupCnt[idx - 1]
def _encodeParalogClusterId(self, prefix, nr):
"""encode the paralogGroups at the same level, e.g. 1a, 1b, 1c
for 3 paralogGroups next to each other. the nr argument
identifies the individual indices of those 3 paralogGroups."""
letters = []
while nr // 26 > 0:
letters.append(chr(97 + (nr % 26)))
nr = nr // 26 - 1
letters.append(chr(97 + (nr % 26)))
return prefix + ''.join(letters[::-1]) # letters were in reverse order
def _annotateGroupR(self, node, og, idx=0):
"""create the og attributes at the orthologGroup elements
according to the naming schema of LOFT. ParalogGroup elements
do not get own attributes (not possible in the xml schema),
but propagate their sub-names for the subsequent orthologGroup
elements."""
if self.parser.is_ortholog_group(node):
taxid_node = OrthoXMLQuery.getTaxidNodes(node, recursively=False)
if len(taxid_node) > 0:
node.set('id', '{}_{}'.format(og, taxid_node[0].get('value')))
else:
node.set('og', og)
for child in list(node):
self._annotateGroupR(child, og, idx)
elif self.parser.is_paralog_group(node):
idx += 1
nextOG = "{}.{}".format(og, self._getNextSubId(idx))
for i, child in enumerate(list(node)):
self._annotateGroupR(child,
self._encodeParalogClusterId(nextOG, i),
idx)
def _addTaxRangeR(self, node, noUpwardLevels=False):
"""recursive method to add TaxRange property tags."""
if self.parser.is_ortholog_group(node) or self.parser.is_paralog_group(node) \
or OrthoXMLQuery.is_geneRef_node(node):
species_covered, nr_genes = self.parser.get_species_below_node(node, return_gene_total_count=True)
species_covered = set(self.tax.map_potential_internal_speciesname_to_leaf(s) for s in species_covered)
current_level = self.tax.mrca(species_covered)
og_tag = '{{{}}}orthologGroup'.format(OrthoXMLQuery.ns['ns0'])
if self.parser.is_ortholog_group(node):
comp_score = OrthoXMLQuery.getScoreNodes(node, 'CompletenessScore')
if len(comp_score) == 0:
node.append(self._createCompletnessScoreTag(current_level, species_covered))
node.append(self._createNrMemberGeneTag(nr_genes))
taxrange = OrthoXMLQuery.getTaxRangeNodes(node, False)
taxid = OrthoXMLQuery.getTaxidNodes(node, False)
if len(taxrange) > 0:
# check consistency between current_level and value stored in taxrange
if taxrange[0].get('value') != current_level:
raise Exception("Inconsistent TaxRange: {} vs current_level {}"
.format(taxrange[0].get('value'), current_level))
if len(taxid) > 0:
if taxid[0].get('value') != self.taxrange_2_taxid[current_level]:
raise Exception("Inconsitency between taxids: {} vs {}"
.format(taxid[0].get('value'), self.taxrange_2_taxid[current_level]))
else:
try:
node.append(self._create_taxid(current_level))
except KeyError:
pass
else:
node.append(self._createTaxRangeTags(current_level))
try: # find the closest ancestral orthogroup if it has a TaxRange property
parent_orthogroup = next(node.iterancestors(og_tag))
parent_levels = {z.get('value')
for z in OrthoXMLQuery.getTaxRangeNodes(parent_orthogroup, False)}
except StopIteration: # couldn't find a parent with a TaxRange property; no extra annotation possible
parent_levels = set([])
if len(parent_levels) > 0:
most_recent_parent_level = self.tax.mostSpecific(parent_levels)
# Ortholog Node - append missing tax range(s) as property tags under the current node
if self.parser.is_ortholog_group(node):
self._insertOGs_between(node.getparent(), node, current_level, most_recent_parent_level, nr_genes,
species_covered, include_self=False)
# Paralog Node - insert ortholog node between self and parent; add missing tax range(s) to new parent
elif self.parser.is_paralog_group(node):
if self.tax.levels_between(most_recent_parent_level, current_level) > 1:
self._insertOGs_between(node.getparent(), node, current_level, most_recent_parent_level,
nr_genes, species_covered, include_self=False)
# GeneRef Node - insert ortholog node between self and parent; add all tax range(s) to new parent
else:
self._insertOGs_between(node.getparent(), node, current_level, most_recent_parent_level,
nr_genes, species_covered, include_self=True)
return
for child in node:
self._addTaxRangeR(child, noUpwardLevels)
def _insertOGs_between(self, parent, child, specificLev, beforeLev, nr_genes, covered_species, include_self=True):
pos = parent.index(child)
if include_self:
child = self._insert_one_OG(child, specificLev, covered_species, nr_genes)
for lev in self.tax.iterParents(specificLev, stopBefore=beforeLev):
child = self._insert_one_OG(child, lev, covered_species=covered_species, nr_genes=nr_genes)
parent.insert(pos, child)
def _insert_one_OG(self, child, level, covered_species, nr_genes):
el = etree.Element('{{{ns0}}}orthologGroup'.format(**self.parser.ns))
el.append(self._createCompletnessScoreTag(level, covered_species))
el.extend(self._createTaxRangeTags(level))
el.append(self._createNrMemberGeneTag(nr_genes))
el.append(child)
return el
def _createTaxRangeTags(self, lev, **kwargs):
tags = [self._create_tax_range(lev, **kwargs)]
try:
tags.append(self._create_taxid(lev))
except KeyError:
pass
return tags
def _create_tax_range(self, lev, **kwargs):
return etree.Element('{{{ns0}}}property'.format(**self.parser.ns),
attrib=dict(name='TaxRange', value=lev, **kwargs))
def _create_taxid(self, lev):
return etree.Element('{{{ns0}}}property'.format(**self.parser.ns),
attrib=dict(name='taxid',
value=self.taxrange_2_taxid[lev]))
def _createNrMemberGeneTag(self, nr_genes):
return etree.Element('{{{ns0}}}property'.format(**self.parser.ns),
attrib={"name": "NrMemberGenes", "value": str(nr_genes)})
def completenessScore(self, level, covered_species):
return "{:.3f}".format(len(covered_species) / len(self.tax.descendents[level]))
def _createCompletnessScoreTag(self, level, covered_species):
el = etree.Element('{{{ns0}}}score'.format(**self.parser.ns),
attrib={"id": "CompletenessScore",
"value": self.completenessScore(level, covered_species)})
return el
def annotateMissingTaxRanges(self, tax, propagate_top=False, verbosity=0):
"""This function adds left-out taxrange property elements to
the orthologGroup elements in the xml. It will add all the levels
defined in the 'tax'-Taxonomy between the parents most specific
level and the current nodes level. If no parent exists, all
tax-levels above the current one are used."""
self.tax = tax
top_level_groups = self.parser.getToplevelGroups()
if PROGRESSBAR and verbosity > 0:
pbar = setup_progressbar(
'Adding missing taxonomy annotation: ',
len(top_level_groups)
)
pbar.start()
for i, fam in enumerate(top_level_groups, start=1):
self._addTaxRangeR(fam, noUpwardLevels=not propagate_top)
if PROGRESSBAR and verbosity > 0:
pbar.update(i)
if PROGRESSBAR and verbosity > 0:
pbar.finish()
del self.tax
def annotateDoc(self):
"""apply the LOFT naming schema to all the orthologGroups."""
for i, fam in enumerate(self.parser.getToplevelGroups()):
self.dupCnt = list()
self._annotateGroupR(fam, fam.get('id', str(i)))
def annotateSingletons(self, verbosity=0):
"""Any input genes that aren't assigned to ortholog groups are
singletons, which are added to the xml as extra ortholog groups"""
if PROGRESSBAR and verbosity > 0:
pbar = setup_progressbar('Adding singletons: ', 1)
pbar.start()
highest_group = max(self.parser.getToplevelGroups(),
key=lambda x: int(
x.get('id'))) # TODO: If top-level orthologNodes have no id field this errors out
input_genes = set(n.get('id') for n in # Maybe add code to enumerate OG nodes if ids are missing?
OrthoXMLQuery.getInputGenes(self.parser.root))
grouped_genes = set(n.get('id') for n in
OrthoXMLQuery.getGroupedGenes(self.parser.root))
singletons = input_genes - grouped_genes
groups_node = OrthoXMLQuery.getSubNodes('groups', self.parser.root)[0]
fam_num = int(highest_group.get('id')) + 1
singleton_families = set()
if PROGRESSBAR and verbosity > 0:
pbar.maxval = len(singletons)
for i, gene in enumerate(sorted(singletons), start=1):
singleton_families.add(str(fam_num))
species = self.parser.mapGeneToSpecies(gene)
new_node = etree.Element('{{{ns0}}}orthologGroup'.format(
**self.parser.ns),
id=str(fam_num))
new_node.extend(self._createTaxRangeTags(species))
new_node.append(etree.Element('{{{ns0}}}geneRef'.format(
**self.parser.ns),
id=gene))
groups_node.append(new_node)
fam_num += 1
if PROGRESSBAR and verbosity > 0:
pbar.update(i)
self.parser.singletons = singleton_families
if PROGRESSBAR and verbosity > 0:
pbar.finish()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=unused-import
"""MobileNet v1 models for Keras.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and
different width factors. This allows different width models to reduce
the number of multiply-adds and thereby
reduce inference cost on mobile devices.
MobileNets support any input size greater than 32 x 32, with larger image sizes
offering better performance.
The number of parameters and number of multiply-adds
can be modified by using the `alpha` parameter,
which increases/decreases the number of filters in each layer.
By altering the image size and `alpha` parameter,
all 16 models from the paper can be built, with ImageNet weights provided.
The paper demonstrates the performance of MobileNets using `alpha` values of
1.0 (also called 100 % MobileNet), 0.75, 0.5 and 0.25.
For each of these `alpha` values, weights for 4 different input image sizes
are provided (224, 192, 160, 128).
The following table describes the size and accuracy of the 100% MobileNet
on size 224 x 224:
----------------------------------------------------------------------------
Width Multiplier (alpha) | ImageNet Acc | Multiply-Adds (M) | Params (M)
----------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 0.75 MobileNet-224 | 68.4 % | 325 | 2.6 |
| 0.50 MobileNet-224 | 63.7 % | 149 | 1.3 |
| 0.25 MobileNet-224 | 50.6 % | 41 | 0.5 |
----------------------------------------------------------------------------
The following table describes the performance of
the 100 % MobileNet on various input sizes:
------------------------------------------------------------------------
Resolution | ImageNet Acc | Multiply-Adds (M) | Params (M)
------------------------------------------------------------------------
| 1.0 MobileNet-224 | 70.6 % | 529 | 4.2 |
| 1.0 MobileNet-192 | 69.1 % | 529 | 4.2 |
| 1.0 MobileNet-160 | 67.2 % | 529 | 4.2 |
| 1.0 MobileNet-128 | 64.4 % | 529 | 4.2 |
------------------------------------------------------------------------
The weights for all 16 models are obtained and translated
from TensorFlow checkpoints found at
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md
# Reference
- [MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications](https://arxiv.org/pdf/1704.04861.pdf))
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.python.keras.applications.imagenet_utils import decode_predictions
from tensorflow.python.keras.layers import Activation
from tensorflow.python.keras.layers import BatchNormalization
from tensorflow.python.keras.layers import Conv2D
from tensorflow.python.keras.layers import DepthwiseConv2D
from tensorflow.python.keras.layers import Dropout
from tensorflow.python.keras.layers import GlobalAveragePooling2D
from tensorflow.python.keras.layers import GlobalMaxPooling2D
from tensorflow.python.keras.layers import Input
from tensorflow.python.keras.layers import ReLU
from tensorflow.python.keras.layers import Reshape
from tensorflow.python.keras.layers import ZeroPadding2D
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
BASE_WEIGHT_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.6/'
@tf_export('keras.applications.mobilenet.preprocess_input')
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
Arguments:
x: a 4D numpy array consists of RGB values within [0, 255].
Returns:
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf')
@tf_export('keras.applications.MobileNet',
'keras.applications.mobilenet.MobileNet')
def MobileNet(input_shape=None,
alpha=1.0,
depth_multiplier=1,
dropout=1e-3,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000):
"""Instantiates the MobileNet architecture.
Arguments:
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or (3, 224, 224) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: depth multiplier for depthwise convolution
(also called the resolution multiplier)
dropout: dropout rate
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as ImageNet with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if K.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = _obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=K.image_data_format(),
require_flatten=include_top,
weights=weights)
if K.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1')
if alpha not in [0.25, 0.50, 0.75, 1.0]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.25`, `0.50`, `0.75` or `1.0` only.')
if rows != cols or rows not in [128, 160, 192, 224]:
if rows is None:
rows = 224
logging.warning('MobileNet shape is undefined.'
' Weights for input shape (224, 224) will be loaded.')
else:
raise ValueError('If imagenet weights are being loaded, '
'input must have a static square shape (one of '
'(128, 128), (160, 160), (192, 192), or (224, 224)).'
' Input shape provided = %s' % (input_shape,))
if K.image_data_format() != 'channels_last':
logging.warning('The MobileNet family of models is only available '
'for the input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
'in your Keras config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, strides=(2, 2))
x = _depthwise_conv_block(x, 64, alpha, depth_multiplier, block_id=1)
x = _depthwise_conv_block(
x, 128, alpha, depth_multiplier, strides=(2, 2), block_id=2)
x = _depthwise_conv_block(x, 128, alpha, depth_multiplier, block_id=3)
x = _depthwise_conv_block(
x, 256, alpha, depth_multiplier, strides=(2, 2), block_id=4)
x = _depthwise_conv_block(x, 256, alpha, depth_multiplier, block_id=5)
x = _depthwise_conv_block(
x, 512, alpha, depth_multiplier, strides=(2, 2), block_id=6)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=7)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=8)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=9)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=10)
x = _depthwise_conv_block(x, 512, alpha, depth_multiplier, block_id=11)
x = _depthwise_conv_block(
x, 1024, alpha, depth_multiplier, strides=(2, 2), block_id=12)
x = _depthwise_conv_block(x, 1024, alpha, depth_multiplier, block_id=13)
if include_top:
if K.image_data_format() == 'channels_first':
shape = (int(1024 * alpha), 1, 1)
else:
shape = (1, 1, int(1024 * alpha))
x = GlobalAveragePooling2D()(x)
x = Reshape(shape, name='reshape_1')(x)
x = Dropout(dropout, name='dropout')(x)
x = Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
x = Activation('softmax', name='act_softmax')(x)
x = Reshape((classes,), name='reshape_2')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='mobilenet_%0.2f_%s' % (alpha, rows))
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
raise ValueError('Weights for "channels_first" format '
'are not available.')
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '2_5'
if include_top:
model_name = 'mobilenet_%s_%d_tf.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(model_name, weigh_path, cache_subdir='models')
else:
model_name = 'mobilenet_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH + model_name
weights_path = get_file(model_name, weigh_path, cache_subdir='models')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
if old_data_format:
K.set_image_data_format(old_data_format)
return model
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
Arguments:
inputs: Input tensor of shape `(rows, cols, 3)`
(with `channels_last` data format) or
(3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
filters = int(filters * alpha)
x = ZeroPadding2D(padding=(1, 1), name='conv1_pad')(inputs)
x = Conv2D(
filters,
kernel,
padding='valid',
use_bias=False,
strides=strides,
name='conv1')(x)
x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return ReLU(6, name='conv1_relu')(x)
def _depthwise_conv_block(inputs,
pointwise_conv_filters,
alpha,
depth_multiplier=1,
strides=(1, 1),
block_id=1):
"""Adds a depthwise convolution block.
A depthwise convolution block consists of a depthwise conv,
batch normalization, relu6, pointwise convolution,
batch normalization and relu6 activation.
Arguments:
inputs: Input tensor of shape `(rows, cols, channels)`
(with `channels_last` data format) or
(channels, rows, cols) (with `channels_first` data format).
pointwise_conv_filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the pointwise convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
depth_multiplier: The number of depthwise convolution output channels
for each input channel.
The total number of depthwise convolution output
channels will be equal to `filters_in * depth_multiplier`.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
block_id: Integer, a unique identification designating the block number.
Input shape:
4D tensor with shape:
`(batch, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
pointwise_conv_filters = int(pointwise_conv_filters * alpha)
x = ZeroPadding2D(padding=(1, 1), name='conv_pad_%d' % block_id)(inputs)
x = DepthwiseConv2D( # pylint: disable=not-callable
(3, 3),
padding='valid',
depth_multiplier=depth_multiplier,
strides=strides,
use_bias=False,
name='conv_dw_%d' % block_id)(x)
x = BatchNormalization(axis=channel_axis, name='conv_dw_%d_bn' % block_id)(x)
x = ReLU(6, name='conv_dw_%d_relu' % block_id)(x)
x = Conv2D(
pointwise_conv_filters, (1, 1),
padding='same',
use_bias=False,
strides=(1, 1),
name='conv_pw_%d' % block_id)(
x)
x = BatchNormalization(axis=channel_axis, name='conv_pw_%d_bn' % block_id)(x)
return ReLU(6, name='conv_pw_%d_relu' % block_id)(x)
| |
import argparse
import asyncio
import base64
import json
import os
from shlex import quote as shq
from typing import Dict, List, Optional, Tuple
import kubernetes_asyncio.client
import kubernetes_asyncio.config
from batch.driver.k8s_cache import K8sCache
from ci.build import BuildConfiguration, Code
from ci.environment import KUBERNETES_SERVER_URL, STORAGE_URI
from ci.github import clone_or_fetch_script
from ci.utils import generate_token
from hailtop.utils import check_shell_output
BATCH_WORKER_IMAGE = os.environ['BATCH_WORKER_IMAGE']
def populate_secret_host_path(host_path: str, secret_data: Dict[str, bytes]):
os.makedirs(host_path)
if secret_data is not None:
for filename, data in secret_data.items():
with open(f'{host_path}/{filename}', 'wb') as f:
f.write(base64.b64decode(data))
class LocalJob:
def __init__(
self,
index: int,
image: str,
command: List[str],
*,
env: Optional[Dict[str, str]] = None,
mount_docker_socket: bool = False,
unconfined: bool = False,
secrets: Optional[List[Dict[str, str]]] = None,
service_account: Optional[Dict[str, str]] = None,
attributes: Optional[Dict[str, str]] = None,
parents: Optional[List['LocalJob']] = None,
input_files: Optional[List[Tuple[str, str]]] = None,
output_files: Optional[List[Tuple[str, str]]] = None,
**kwargs,
):
self._index = index
self._image = image
self._command = command
self._env = env
self._mount_docker_socket = mount_docker_socket
self._unconfined = unconfined
self._parents = parents
self._secrets = secrets
self._service_account = service_account
self._attributes = attributes
self._input_files = input_files
self._output_files = output_files
self._kwargs = kwargs
self._succeeded: Optional[bool] = None
async def docker_run(*args: str):
script = ' '.join([shq(a) for a in args])
outerr = await check_shell_output(script)
print(f'Container output: {outerr[0]!r}\n' f'Container error: {outerr[1]!r}')
cid = outerr[0].decode('ascii').strip()
outerr = await check_shell_output(f'docker wait {cid}')
exit_code = int(outerr[0].decode('ascii').strip())
return cid, exit_code == 0
class LocalBatchBuilder:
def __init__(self, attributes: Dict[str, str], callback: Optional[str]):
self._attributes = attributes
self._callback = callback
self._jobs: List[LocalJob] = []
@property
def attributes(self) -> Dict[str, str]:
return self._attributes
@property
def callback(self) -> Optional[str]:
return self._callback
def create_job(self, image: str, command: List[str], **kwargs):
index = len(self._jobs)
job = LocalJob(index, image, command, **kwargs)
self._jobs.append(job)
return job
async def run(self):
cwd = os.getcwd()
assert cwd.startswith('/')
batch_token = self._attributes['token']
root = f'{cwd}/_/{batch_token}'
os.makedirs(f'{root}/shared')
prefix = f'{STORAGE_URI}/build/{batch_token}'
for j in self._jobs:
job_name = j._attributes.get('name')
print(f'{j._index}: {job_name}: running...')
if j._parents:
for p in j._parents:
assert p._succeeded is not None
if not p._succeeded:
print(f'{j._index}: {job_name}: SKIPPED: parent {p._index} failed')
j._succeeded = False
if j._succeeded is False:
continue
job_root = f'{root}/{j._index}'
os.makedirs(f'{job_root}/io')
os.makedirs(f'{job_root}/secrets')
if j._input_files:
files = []
for src, dest in j._input_files:
assert src.startswith(prefix), (prefix, src)
src = f'/shared{src[len(prefix):]}'
files.append(
{
'from': src,
'to': dest,
}
)
input_cid, input_ok = await docker_run(
'docker',
'run',
'-d',
'-v',
f'{root}/shared:/shared',
'-v',
f'{job_root}/io:/io',
'--entrypoint',
'/usr/bin/python3',
BATCH_WORKER_IMAGE,
'-m',
'hailtop.aiotools.copy',
json.dumps(None),
json.dumps(files),
)
print(f'{j._index}: {job_name}/input: {input_cid} {"OK" if input_ok else "FAILED"}')
else:
input_ok = True
if input_ok:
mount_options = ['-v', f'{job_root}/io:/io']
env_options = []
if j._env:
for key, value in j._env.items():
env_options.extend(['-e', f'{key}={value}'])
# Reboot the cache on each use. The kube client isn't
# refreshing tokens correctly.
# https://github.com/kubernetes-client/python/issues/741
# Note, that is in the kubenetes-client repo, the
# kubernetes_asyncio. I'm assuming it has the same
# issue.
k8s_client = kubernetes_asyncio.client.CoreV1Api()
try:
k8s_cache = K8sCache(k8s_client)
if j._service_account:
namespace = j._service_account['namespace']
name = j._service_account['name']
sa = await k8s_cache.read_service_account(name, namespace)
assert len(sa.secrets) == 1
token_secret_name = sa.secrets[0].name
secret = await k8s_cache.read_secret(token_secret_name, namespace)
token = base64.b64decode(secret.data['token']).decode()
cert = secret.data['ca.crt']
kube_config = f'''
apiVersion: v1
clusters:
- cluster:
certificate-authority: /.kube/ca.crt
server: {KUBERNETES_SERVER_URL}
name: default-cluster
contexts:
- context:
cluster: default-cluster
user: {namespace}-{name}
namespace: {namespace}
name: default-context
current-context: default-context
kind: Config
preferences: {{}}
users:
- name: {namespace}-{name}
user:
token: {token}
'''
dot_kube_dir = f'{job_root}/secrets/.kube'
os.makedirs(dot_kube_dir)
with open(f'{dot_kube_dir}/config', 'w') as f:
f.write(kube_config)
with open(f'{dot_kube_dir}/ca.crt', 'w') as f:
f.write(base64.b64decode(cert).decode())
mount_options.extend(['-v', f'{dot_kube_dir}:/.kube'])
env_options.extend(['-e', 'KUBECONFIG=/.kube/config'])
secrets = j._secrets
if secrets:
k8s_secrets = await asyncio.gather(
*[k8s_cache.read_secret(secret['name'], secret['namespace']) for secret in secrets]
)
for secret, k8s_secret in zip(secrets, k8s_secrets):
secret_host_path = f'{job_root}/secrets/{k8s_secret.metadata.name}'
populate_secret_host_path(secret_host_path, k8s_secret.data)
mount_options.extend(['-v', f'{secret_host_path}:{secret["mount_path"]}'])
if j._mount_docker_socket:
mount_options.extend(['-v', '/var/run/docker.sock:/var/run/docker.sock'])
if j._unconfined:
security_options = [
'--security-opt',
'seccomp=unconfined',
'--security-opt',
'apparmor=unconfined',
]
else:
security_options = []
main_cid, main_ok = await docker_run(
'docker',
'run',
'-d',
*env_options,
*mount_options,
*security_options,
'--entrypoint',
j._command[0],
j._image,
*j._command[1:],
)
print(f'{j._index}: {job_name}/main: {main_cid} {"OK" if main_ok else "FAILED"}')
finally:
await k8s_client.api_client.rest_client.pool_manager.close()
else:
main_ok = False
print(f'{j._index}: {job_name}/main: SKIPPED: input failed')
if j._output_files:
if main_ok:
files = []
for src, dest in j._output_files:
assert dest.startswith(prefix), (prefix, dest)
dest = f'/shared{dest[len(prefix):]}'
files.append(
{
'from': src,
'to': dest,
}
)
output_cid, output_ok = await docker_run(
'docker',
'run',
'-d',
'-v',
f'{root}/shared:/shared',
'-v',
f'{job_root}/io:/io',
'--entrypoint',
'/usr/bin/python3',
BATCH_WORKER_IMAGE,
'-m',
'hailtop.aiotools.copy',
json.dumps(None),
json.dumps(files),
)
print(f'{j._index}: {job_name}/output: {output_cid} {"OK" if output_ok else "FAILED"}')
else:
output_ok = False
print(f'{j._index}: {job_name}/output: SKIPPED: main failed')
else:
output_ok = True
j._succeeded = input_ok and main_ok and output_ok
class Branch(Code):
def __init__(self, owner: str, repo: str, branch: str, sha: str, extra_config: Dict[str, str]):
self._owner = owner
self._repo = repo
self._branch = branch
self._sha = sha
self._extra_config = extra_config
def short_str(self) -> str:
return f'br-{self._owner}-{self._repo}-{self._branch}'
def repo_url(self) -> str:
return f'https://github.com/{self._owner}/{self._repo}'
def config(self) -> Dict[str, str]:
config = {
'checkout_script': self.checkout_script(),
'branch': self._branch,
'repo': f'{self._owner}/{self._repo}',
'repo_url': self.repo_url(),
'sha': self._sha,
}
config.update(self._extra_config)
return config
def checkout_script(self) -> str:
return f'''
{clone_or_fetch_script(self.repo_url())}
git checkout {shq(self._sha)}
'''
def repo_dir(self) -> str:
return '.'
async def main():
await kubernetes_asyncio.config.load_kube_config()
parser = argparse.ArgumentParser(description='Bootstrap a Hail as a service installation.')
parser.add_argument(
'--extra-code-config', dest='extra_code_config', default='{}', help='Extra code config in JSON format.'
)
parser.add_argument(
'branch', help='Github branch to run. It should be the same branch bootstrap.py is being run from.'
)
parser.add_argument('sha', help='SHA of the git commit to run. It should match the branch.')
parser.add_argument('steps', help='The requested steps to execute.')
args = parser.parse_args()
branch_pieces = args.branch.split(":")
assert len(branch_pieces) == 2, f'{branch_pieces} {args.branch}'
repo_pieces = branch_pieces[0].split("/")
assert len(repo_pieces) == 2, f'{repo_pieces} {branch_pieces[0]}'
owner = repo_pieces[0]
repo_name = repo_pieces[1]
branch_name = branch_pieces[1]
extra_code_config = json.loads(args.extra_code_config)
scope = 'deploy'
code = Branch(owner, repo_name, branch_name, args.sha, extra_code_config)
steps = [s.strip() for s in args.steps.split(',')]
with open('build.yaml', 'r') as f:
config = BuildConfiguration(code, f.read(), scope, requested_step_names=steps)
token = generate_token()
batch = LocalBatchBuilder(attributes={'token': token}, callback=None)
config.build(batch, code, scope)
await batch.run()
asyncio.get_event_loop().run_until_complete(main())
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base conductor manager functionality."""
import inspect
import threading
import eventlet
import futurist
from futurist import periodics
from futurist import rejection
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_log import log
from oslo_utils import excutils
from ironic.common import context as ironic_context
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import hash_ring as hash
from ironic.common.i18n import _
from ironic.common.i18n import _LC
from ironic.common.i18n import _LE
from ironic.common.i18n import _LI
from ironic.common.i18n import _LW
from ironic.common import rpc
from ironic.common import states
from ironic.conductor import task_manager
from ironic.db import api as dbapi
from ironic import objects
conductor_opts = [
cfg.IntOpt('workers_pool_size',
default=100, min=3,
help=_('The size of the workers greenthread pool. '
'Note that 2 threads will be reserved by the conductor '
'itself for handling heart beats and periodic tasks.')),
cfg.IntOpt('heartbeat_interval',
default=10,
help=_('Seconds between conductor heart beats.')),
]
CONF = cfg.CONF
CONF.register_opts(conductor_opts, 'conductor')
LOG = log.getLogger(__name__)
class BaseConductorManager(object):
def __init__(self, host, topic):
super(BaseConductorManager, self).__init__()
if not host:
host = CONF.host
self.host = host
self.topic = topic
self.notifier = rpc.get_notifier()
self._started = False
def init_host(self, admin_context=None):
"""Initialize the conductor host.
:param admin_context: the admin context to pass to periodic tasks.
:raises: RuntimeError when conductor is already running.
:raises: NoDriversLoaded when no drivers are enabled on the conductor.
:raises: DriverNotFound if a driver is enabled that does not exist.
:raises: DriverLoadError if an enabled driver cannot be loaded.
"""
if self._started:
raise RuntimeError(_('Attempt to start an already running '
'conductor manager'))
self.dbapi = dbapi.get_instance()
self._keepalive_evt = threading.Event()
"""Event for the keepalive thread."""
# TODO(dtantsur): make the threshold configurable?
rejection_func = rejection.reject_when_reached(
CONF.conductor.workers_pool_size)
self._executor = futurist.GreenThreadPoolExecutor(
max_workers=CONF.conductor.workers_pool_size,
check_and_reject=rejection_func)
"""Executor for performing tasks async."""
self.ring_manager = hash.HashRingManager()
"""Consistent hash ring which maps drivers to conductors."""
# NOTE(deva): this call may raise DriverLoadError or DriverNotFound
drivers = driver_factory.drivers()
if not drivers:
msg = _LE("Conductor %s cannot be started because no drivers "
"were loaded. This could be because no drivers were "
"specified in 'enabled_drivers' config option.")
LOG.error(msg, self.host)
raise exception.NoDriversLoaded(conductor=self.host)
# NOTE(jroll) this is passed to the dbapi, which requires a list, not
# a generator (which keys() returns in py3)
driver_names = list(drivers)
# Collect driver-specific periodic tasks.
# Conductor periodic tasks accept context argument, driver periodic
# tasks accept this manager and context. We have to ensure that the
# same driver interface class is not traversed twice, otherwise
# we'll have several instances of the same task.
LOG.debug('Collecting periodic tasks')
self._periodic_task_callables = []
periodic_task_classes = set()
self._collect_periodic_tasks(self, (admin_context,))
for driver_obj in drivers.values():
self._collect_periodic_tasks(driver_obj, (self, admin_context))
for iface_name in driver_obj.all_interfaces:
iface = getattr(driver_obj, iface_name, None)
if iface and iface.__class__ not in periodic_task_classes:
self._collect_periodic_tasks(iface, (self, admin_context))
periodic_task_classes.add(iface.__class__)
if (len(self._periodic_task_callables) >
CONF.conductor.workers_pool_size):
LOG.warning(_LW('This conductor has %(tasks)d periodic tasks '
'enabled, but only %(workers)d task workers '
'allowed by [conductor]workers_pool_size option'),
{'tasks': len(self._periodic_task_callables),
'workers': CONF.conductor.workers_pool_size})
self._periodic_tasks = periodics.PeriodicWorker(
self._periodic_task_callables,
executor_factory=periodics.ExistingExecutor(self._executor))
# clear all locks held by this conductor before registering
self.dbapi.clear_node_reservations_for_conductor(self.host)
try:
# Register this conductor with the cluster
self.conductor = objects.Conductor.register(
admin_context, self.host, driver_names)
except exception.ConductorAlreadyRegistered:
# This conductor was already registered and did not shut down
# properly, so log a warning and update the record.
LOG.warning(
_LW("A conductor with hostname %(hostname)s "
"was previously registered. Updating registration"),
{'hostname': self.host})
self.conductor = objects.Conductor.register(
admin_context, self.host, driver_names, update_existing=True)
# Start periodic tasks
self._periodic_tasks_worker = self._executor.submit(
self._periodic_tasks.start, allow_empty=True)
self._periodic_tasks_worker.add_done_callback(
self._on_periodic_tasks_stop)
# NOTE(lucasagomes): If the conductor server dies abruptly
# mid deployment (OMM Killer, power outage, etc...) we
# can not resume the deployment even if the conductor
# comes back online. Cleaning the reservation of the nodes
# (dbapi.clear_node_reservations_for_conductor) is not enough to
# unstick it, so let's gracefully fail the deployment so the node
# can go through the steps (deleting & cleaning) to make itself
# available again.
filters = {'reserved': False,
'provision_state': states.DEPLOYING}
last_error = (_("The deployment can't be resumed by conductor "
"%s. Moving to fail state.") % self.host)
self._fail_if_in_state(ironic_context.get_admin_context(), filters,
states.DEPLOYING, 'provision_updated_at',
last_error=last_error)
# Start consoles if it set enabled in a greenthread.
try:
self._spawn_worker(self._start_consoles,
ironic_context.get_admin_context())
except exception.NoFreeConductorWorker:
LOG.warning(_LW('Failed to start worker for restarting consoles.'))
# Spawn a dedicated greenthread for the keepalive
try:
self._spawn_worker(self._conductor_service_record_keepalive)
LOG.info(_LI('Successfully started conductor with hostname '
'%(hostname)s.'),
{'hostname': self.host})
except exception.NoFreeConductorWorker:
with excutils.save_and_reraise_exception():
LOG.critical(_LC('Failed to start keepalive'))
self.del_host()
self._started = True
def del_host(self, deregister=True):
# Conductor deregistration fails if called on non-initialized
# conductor (e.g. when rpc server is unreachable).
if not hasattr(self, 'conductor'):
return
self._keepalive_evt.set()
if deregister:
try:
# Inform the cluster that this conductor is shutting down.
# Note that rebalancing will not occur immediately, but when
# the periodic sync takes place.
self.conductor.unregister()
LOG.info(_LI('Successfully stopped conductor with hostname '
'%(hostname)s.'),
{'hostname': self.host})
except exception.ConductorNotFound:
pass
else:
LOG.info(_LI('Not deregistering conductor with hostname '
'%(hostname)s.'),
{'hostname': self.host})
# Waiting here to give workers the chance to finish. This has the
# benefit of releasing locks workers placed on nodes, as well as
# having work complete normally.
self._periodic_tasks.stop()
self._periodic_tasks.wait()
self._executor.shutdown(wait=True)
self._started = False
def _collect_periodic_tasks(self, obj, args):
"""Collect periodic tasks from a given object.
Populates self._periodic_task_callables with tuples
(callable, args, kwargs).
:param obj: object containing periodic tasks as methods
:param args: tuple with arguments to pass to every task
"""
for name, member in inspect.getmembers(obj):
if periodics.is_periodic(member):
LOG.debug('Found periodic task %(owner)s.%(member)s',
{'owner': obj.__class__.__name__,
'member': name})
self._periodic_task_callables.append((member, args, {}))
def _on_periodic_tasks_stop(self, fut):
try:
fut.result()
except Exception as exc:
LOG.critical(_LC('Periodic tasks worker has failed: %s'), exc)
else:
LOG.info(_LI('Successfully shut down periodic tasks'))
def iter_nodes(self, fields=None, **kwargs):
"""Iterate over nodes mapped to this conductor.
Requests node set from and filters out nodes that are not
mapped to this conductor.
Yields tuples (node_uuid, driver, ...) where ... is derived from
fields argument, e.g.: fields=None means yielding ('uuid', 'driver'),
fields=['foo'] means yielding ('uuid', 'driver', 'foo').
:param fields: list of fields to fetch in addition to uuid and driver
:param kwargs: additional arguments to pass to dbapi when looking for
nodes
:return: generator yielding tuples of requested fields
"""
columns = ['uuid', 'driver'] + list(fields or ())
node_list = self.dbapi.get_nodeinfo_list(columns=columns, **kwargs)
for result in node_list:
if self._mapped_to_this_conductor(*result[:2]):
yield result
def _spawn_worker(self, func, *args, **kwargs):
"""Create a greenthread to run func(*args, **kwargs).
Spawns a greenthread if there are free slots in pool, otherwise raises
exception. Execution control returns immediately to the caller.
:returns: Future object.
:raises: NoFreeConductorWorker if worker pool is currently full.
"""
try:
return self._executor.submit(func, *args, **kwargs)
except futurist.RejectedSubmission:
raise exception.NoFreeConductorWorker()
def _conductor_service_record_keepalive(self):
while not self._keepalive_evt.is_set():
try:
self.conductor.touch()
except db_exception.DBConnectionError:
LOG.warning(_LW('Conductor could not connect to database '
'while heartbeating.'))
self._keepalive_evt.wait(CONF.conductor.heartbeat_interval)
def _mapped_to_this_conductor(self, node_uuid, driver):
"""Check that node is mapped to this conductor.
Note that because mappings are eventually consistent, it is possible
for two conductors to simultaneously believe that a node is mapped to
them. Any operation that depends on exclusive control of a node should
take out a lock.
"""
try:
ring = self.ring_manager[driver]
except exception.DriverNotFound:
return False
return self.host in ring.get_hosts(node_uuid)
def _fail_if_in_state(self, context, filters, provision_state,
sort_key, callback_method=None,
err_handler=None, last_error=None,
keep_target_state=False):
"""Fail nodes that are in specified state.
Retrieves nodes that satisfy the criteria in 'filters'.
If any of these nodes is in 'provision_state', it has failed
in whatever provisioning activity it was currently doing.
That failure is processed here.
:param: context: request context
:param: filters: criteria (as a dictionary) to get the desired
list of nodes that satisfy the filter constraints.
For example, if filters['provisioned_before'] = 60,
this would process nodes whose provision_updated_at
field value was 60 or more seconds before 'now'.
:param: provision_state: provision_state that the node is in,
for the provisioning activity to have failed.
:param: sort_key: the nodes are sorted based on this key.
:param: callback_method: the callback method to be invoked in a
spawned thread, for a failed node. This
method must take a :class:`TaskManager` as
the first (and only required) parameter.
:param: err_handler: for a failed node, the error handler to invoke
if an error occurs trying to spawn an thread
to do the callback_method.
:param: last_error: the error message to be updated in node.last_error
:param: keep_target_state: if True, a failed node will keep the same
target provision state it had before the
failure. Otherwise, the node's target
provision state will be determined by the
fsm.
"""
node_iter = self.iter_nodes(filters=filters,
sort_key=sort_key,
sort_dir='asc')
workers_count = 0
for node_uuid, driver in node_iter:
try:
with task_manager.acquire(context, node_uuid,
purpose='node state check') as task:
if (task.node.maintenance or
task.node.provision_state != provision_state):
continue
target_state = (None if not keep_target_state else
task.node.target_provision_state)
# timeout has been reached - process the event 'fail'
if callback_method:
task.process_event('fail',
callback=self._spawn_worker,
call_args=(callback_method, task),
err_handler=err_handler,
target_state=target_state)
else:
task.node.last_error = last_error
task.process_event('fail', target_state=target_state)
except exception.NoFreeConductorWorker:
break
except (exception.NodeLocked, exception.NodeNotFound):
continue
workers_count += 1
if workers_count >= CONF.conductor.periodic_max_workers:
break
def _start_consoles(self, context):
"""Start consoles if set enabled.
:param: context: request context
"""
filters = {'console_enabled': True}
node_iter = self.iter_nodes(filters=filters)
for node_uuid, driver in node_iter:
try:
with task_manager.acquire(context, node_uuid, shared=False,
purpose='start console') as task:
try:
LOG.debug('Trying to start console of node %(node)s',
{'node': node_uuid})
task.driver.console.start_console(task)
LOG.info(_LI('Successfully started console of node '
'%(node)s'), {'node': node_uuid})
except Exception as err:
msg = (_('Failed to start console of node %(node)s '
'while starting the conductor, so changing '
'the console_enabled status to False, error: '
'%(err)s')
% {'node': node_uuid, 'err': err})
LOG.error(msg)
# If starting console failed, set node console_enabled
# back to False and set node's last error.
task.node.last_error = msg
task.node.console_enabled = False
task.node.save()
except exception.NodeLocked:
LOG.warning(_LW('Node %(node)s is locked while trying to '
'start console on conductor startup'),
{'node': node_uuid})
continue
except exception.NodeNotFound:
LOG.warning(_LW("During starting console on conductor "
"startup, node %(node)s was not found"),
{'node': node_uuid})
continue
finally:
# Yield on every iteration
eventlet.sleep(0)
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
cmdline utility to perform cluster reconnaissance
"""
from __future__ import print_function
from eventlet.green import socket
from six import string_types
from six.moves.urllib.parse import urlparse
from swift.common.utils import (
SWIFT_CONF_FILE, md5_hash_for_file, set_swift_dir)
from swift.common.ring import Ring
from swift.common.storage_policy import POLICIES, reload_storage_policies
import eventlet
import json
import optparse
import time
import sys
import six
import os
if six.PY3:
from eventlet.green.urllib import request as urllib2
else:
from eventlet.green import urllib2
def seconds2timeunit(seconds):
elapsed = seconds
unit = 'seconds'
if elapsed >= 60:
elapsed = elapsed / 60.0
unit = 'minutes'
if elapsed >= 60:
elapsed = elapsed / 60.0
unit = 'hours'
if elapsed >= 24:
elapsed = elapsed / 24.0
unit = 'days'
return elapsed, unit
def size_suffix(size):
suffixes = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
for suffix in suffixes:
if size < 1000:
return "%s %s" % (size, suffix)
size = size // 1000
return "%s %s" % (size, suffix)
class Scout(object):
"""
Obtain swift recon information
"""
def __init__(self, recon_type, verbose=False, suppress_errors=False,
timeout=5):
self.recon_type = recon_type
self.verbose = verbose
self.suppress_errors = suppress_errors
self.timeout = timeout
def scout_host(self, base_url, recon_type):
"""
Perform the actual HTTP request to obtain swift recon telemetry.
:param base_url: the base url of the host you wish to check. str of the
format 'http://127.0.0.1:6200/recon/'
:param recon_type: the swift recon check to request.
:returns: tuple of (recon url used, response body, and status)
"""
url = base_url + recon_type
try:
body = urllib2.urlopen(url, timeout=self.timeout).read()
content = json.loads(body)
if self.verbose:
print("-> %s: %s" % (url, content))
status = 200
except urllib2.HTTPError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = err.code
except (urllib2.URLError, socket.timeout) as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = -1
return url, content, status
def scout(self, host):
"""
Obtain telemetry from a host running the swift recon middleware.
:param host: host to check
:returns: tuple of (recon url used, response body, status, time start
and time end)
"""
base_url = "http://%s:%s/recon/" % (host[0], host[1])
ts_start = time.time()
url, content, status = self.scout_host(base_url, self.recon_type)
ts_end = time.time()
return url, content, status, ts_start, ts_end
def scout_server_type(self, host):
"""
Obtain Server header by calling OPTIONS.
:param host: host to check
:returns: Server type, status
"""
try:
url = "http://%s:%s/" % (host[0], host[1])
req = urllib2.Request(url)
req.get_method = lambda: 'OPTIONS'
conn = urllib2.urlopen(req)
header = conn.info().getheader('Server')
server_header = header.split('/')
content = server_header[0]
status = 200
except urllib2.HTTPError as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = err.code
except (urllib2.URLError, socket.timeout) as err:
if not self.suppress_errors or self.verbose:
print("-> %s: %s" % (url, err))
content = err
status = -1
return url, content, status
class SwiftRecon(object):
"""
Retrieve and report cluster info from hosts running recon middleware.
"""
def __init__(self):
self.verbose = False
self.suppress_errors = False
self.timeout = 5
self.pool_size = 30
self.pool = eventlet.GreenPool(self.pool_size)
self.check_types = ['account', 'container', 'object']
self.server_type = 'object'
def _gen_stats(self, stats, name=None):
"""Compute various stats from a list of values."""
cstats = [x for x in stats if x is not None]
if len(cstats) > 0:
ret_dict = {'low': min(cstats), 'high': max(cstats),
'total': sum(cstats), 'reported': len(cstats),
'number_none': len(stats) - len(cstats), 'name': name}
ret_dict['average'] = \
ret_dict['total'] / float(len(cstats))
ret_dict['perc_none'] = \
ret_dict['number_none'] * 100.0 / len(stats)
else:
ret_dict = {'reported': 0}
return ret_dict
def _print_stats(self, stats):
"""
print out formatted stats to console
:param stats: dict of stats generated by _gen_stats
"""
print('[%(name)s] low: %(low)d, high: %(high)d, avg: '
'%(average).1f, total: %(total)d, '
'Failed: %(perc_none).1f%%, no_result: %(number_none)d, '
'reported: %(reported)d' % stats)
def _ptime(self, timev=None):
"""
:param timev: a unix timestamp or None
:returns: a pretty string of the current time or provided time in UTC
"""
if timev:
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(timev))
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
def get_hosts(self, region_filter, zone_filter, swift_dir, ring_names):
"""
Get a list of hosts in the rings.
:param region_filter: Only list regions matching given filter
:param zone_filter: Only list zones matching given filter
:param swift_dir: Directory of swift config, usually /etc/swift
:param ring_names: Collection of ring names, such as
['object', 'object-2']
:returns: a set of tuples containing the ip and port of hosts
"""
rings = [Ring(swift_dir, ring_name=n) for n in ring_names]
devs = [d for r in rings for d in r.devs if d]
if region_filter is not None:
devs = [d for d in devs if d['region'] == region_filter]
if zone_filter is not None:
devs = [d for d in devs if d['zone'] == zone_filter]
return set((d['ip'], d['port']) for d in devs)
def get_ringmd5(self, hosts, swift_dir):
"""
Compare ring md5sum's with those on remote host
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
:param swift_dir: The local directory with the ring files.
"""
matches = 0
errors = 0
ring_names = set()
if self.server_type == 'object':
for ring_name in os.listdir(swift_dir):
if ring_name.startswith('object') and \
ring_name.endswith('.ring.gz'):
ring_names.add(ring_name)
else:
ring_name = '%s.ring.gz' % self.server_type
ring_names.add(ring_name)
rings = {}
for ring_name in ring_names:
rings[ring_name] = md5_hash_for_file(
os.path.join(swift_dir, ring_name))
recon = Scout("ringmd5", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking ring md5sums" % self._ptime())
if self.verbose:
for ring_file, ring_sum in rings.items():
print("-> On disk %s md5sum: %s" % (ring_file, ring_sum))
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status != 200:
errors = errors + 1
continue
success = True
for remote_ring_file, remote_ring_sum in response.items():
remote_ring_name = os.path.basename(remote_ring_file)
if not remote_ring_name.startswith(self.server_type):
continue
ring_sum = rings.get(remote_ring_name, None)
if remote_ring_sum != ring_sum:
success = False
print("!! %s (%s => %s) doesn't match on disk md5sum" % (
url, remote_ring_name, remote_ring_sum))
if not success:
errors += 1
continue
matches += 1
if self.verbose:
print("-> %s matches." % url)
print("%s/%s hosts matched, %s error[s] while checking hosts." % (
matches, len(hosts), errors))
print("=" * 79)
def get_swiftconfmd5(self, hosts, printfn=print):
"""
Compare swift.conf md5sum with that on remote hosts
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
:param printfn: function to print text; defaults to print()
"""
matches = 0
errors = 0
conf_sum = md5_hash_for_file(SWIFT_CONF_FILE)
recon = Scout("swiftconfmd5", self.verbose, self.suppress_errors,
self.timeout)
printfn("[%s] Checking swift.conf md5sum" % self._ptime())
if self.verbose:
printfn("-> On disk swift.conf md5sum: %s" % (conf_sum,))
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
if response[SWIFT_CONF_FILE] != conf_sum:
printfn("!! %s (%s) doesn't match on disk md5sum" %
(url, response[SWIFT_CONF_FILE]))
else:
matches = matches + 1
if self.verbose:
printfn("-> %s matches." % url)
else:
errors = errors + 1
printfn("%s/%s hosts matched, %s error[s] while checking hosts."
% (matches, len(hosts), errors))
printfn("=" * 79)
def async_check(self, hosts):
"""
Obtain and print async pending statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
scan = {}
recon = Scout("async", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking async pendings" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
scan[url] = response['async_pending']
stats = self._gen_stats(scan.values(), 'async_pending')
if stats['reported'] > 0:
self._print_stats(stats)
else:
print("[async_pending] - No hosts returned valid data.")
print("=" * 79)
def driveaudit_check(self, hosts):
"""
Obtain and print drive audit error statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]
"""
scan = {}
recon = Scout("driveaudit", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking drive-audit errors" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
scan[url] = response['drive_audit_errors']
stats = self._gen_stats(scan.values(), 'drive_audit_errors')
if stats['reported'] > 0:
self._print_stats(stats)
else:
print("[drive_audit_errors] - No hosts returned valid data.")
print("=" * 79)
def umount_check(self, hosts):
"""
Check for and print unmounted drives
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
unmounted = {}
errors = {}
recon = Scout("unmounted", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Getting unmounted drives from %s hosts..." %
(self._ptime(), len(hosts)))
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
unmounted[url] = []
errors[url] = []
for i in response:
if not isinstance(i['mounted'], bool):
errors[url].append(i['device'])
else:
unmounted[url].append(i['device'])
for host in unmounted:
node = urlparse(host).netloc
for entry in unmounted[host]:
print("Not mounted: %s on %s" % (entry, node))
for host in errors:
node = urlparse(host).netloc
for entry in errors[host]:
print("Device errors: %s on %s" % (entry, node))
print("=" * 79)
def server_type_check(self, hosts):
"""
Check for server types on the ring
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
errors = {}
recon = Scout("server_type_check", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Validating server type '%s' on %s hosts..." %
(self._ptime(), self.server_type, len(hosts)))
for url, response, status in self.pool.imap(
recon.scout_server_type, hosts):
if status == 200:
if response != self.server_type + '-server':
errors[url] = response
print("%s/%s hosts ok, %s error[s] while checking hosts." % (
len(hosts) - len(errors), len(hosts), len(errors)))
for host in errors:
print("Invalid: %s is %s" % (host, errors[host]))
print("=" * 79)
def expirer_check(self, hosts):
"""
Obtain and print expirer statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = {'object_expiration_pass': [], 'expired_last_pass': []}
recon = Scout("expirer/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking on expirers" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
stats['object_expiration_pass'].append(
response.get('object_expiration_pass'))
stats['expired_last_pass'].append(
response.get('expired_last_pass'))
for k in stats:
if stats[k]:
computed = self._gen_stats(stats[k], name=k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[%s] - No hosts returned valid data." % k)
else:
print("[%s] - No hosts returned valid data." % k)
print("=" * 79)
def replication_check(self, hosts):
"""
Obtain and print replication statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = {'replication_time': [], 'failure': [], 'success': [],
'attempted': []}
recon = Scout("replication/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking on replication" % self._ptime())
least_recent_time = 9999999999
least_recent_url = None
most_recent_time = 0
most_recent_url = None
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
stats['replication_time'].append(
response.get('replication_time',
response.get('object_replication_time', 0)))
repl_stats = response.get('replication_stats')
if repl_stats:
for stat_key in ['attempted', 'failure', 'success']:
stats[stat_key].append(repl_stats.get(stat_key))
last = response.get('replication_last',
response.get('object_replication_last', 0))
if last < least_recent_time:
least_recent_time = last
least_recent_url = url
if last > most_recent_time:
most_recent_time = last
most_recent_url = url
for k in stats:
if stats[k]:
if k != 'replication_time':
computed = self._gen_stats(stats[k],
name='replication_%s' % k)
else:
computed = self._gen_stats(stats[k], name=k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[%s] - No hosts returned valid data." % k)
else:
print("[%s] - No hosts returned valid data." % k)
if least_recent_url is not None:
host = urlparse(least_recent_url).netloc
if not least_recent_time:
print('Oldest completion was NEVER by %s.' % host)
else:
elapsed = time.time() - least_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Oldest completion was %s (%d %s ago) by %s.' % (
self._ptime(least_recent_time),
elapsed, elapsed_unit, host))
if most_recent_url is not None:
host = urlparse(most_recent_url).netloc
elapsed = time.time() - most_recent_time
elapsed, elapsed_unit = seconds2timeunit(elapsed)
print('Most recent completion was %s (%d %s ago) by %s.' % (
self._ptime(most_recent_time),
elapsed, elapsed_unit, host))
print("=" * 79)
def updater_check(self, hosts):
"""
Obtain and print updater statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = []
recon = Scout("updater/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking updater times" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
if response['%s_updater_sweep' % self.server_type]:
stats.append(response['%s_updater_sweep' %
self.server_type])
if len(stats) > 0:
computed = self._gen_stats(stats, name='updater_last_sweep')
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[updater_last_sweep] - No hosts returned valid data.")
else:
print("[updater_last_sweep] - No hosts returned valid data.")
print("=" * 79)
def auditor_check(self, hosts):
"""
Obtain and print obj auditor statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
scan = {}
adone = '%s_auditor_pass_completed' % self.server_type
afail = '%s_audits_failed' % self.server_type
apass = '%s_audits_passed' % self.server_type
asince = '%s_audits_since' % self.server_type
recon = Scout("auditor/%s" % self.server_type, self.verbose,
self.suppress_errors, self.timeout)
print("[%s] Checking auditor stats" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
scan[url] = response
if len(scan) < 1:
print("Error: No hosts available")
return
stats = {}
stats[adone] = [scan[i][adone] for i in scan
if scan[i][adone] is not None]
stats[afail] = [scan[i][afail] for i in scan
if scan[i][afail] is not None]
stats[apass] = [scan[i][apass] for i in scan
if scan[i][apass] is not None]
stats[asince] = [scan[i][asince] for i in scan
if scan[i][asince] is not None]
for k in stats:
if len(stats[k]) < 1:
print("[%s] - No hosts returned valid data." % k)
else:
if k != asince:
computed = self._gen_stats(stats[k], k)
if computed['reported'] > 0:
self._print_stats(computed)
if len(stats[asince]) >= 1:
low = min(stats[asince])
high = max(stats[asince])
total = sum(stats[asince])
average = total / len(stats[asince])
print('[last_pass] oldest: %s, newest: %s, avg: %s' %
(self._ptime(low), self._ptime(high), self._ptime(average)))
print("=" * 79)
def nested_get_value(self, key, recon_entry):
"""
Generator that yields all values for given key in a recon cache entry.
This is for use with object auditor recon cache entries. If the
object auditor has run in parallel, the recon cache will have entries
of the form: {'object_auditor_stats_ALL': { 'disk1': {..},
'disk2': {..},
'disk3': {..},
...}}
If the object auditor hasn't run in parallel, the recon cache will have
entries of the form: {'object_auditor_stats_ALL': {...}}.
The ZBF auditor doesn't run in parallel. However, if a subset of
devices is selected for auditing, the recon cache will have an entry
of the form: {'object_auditor_stats_ZBF': { 'disk1disk2..diskN': {}}
We use this generator to find all instances of a particular key in
these multi-level dictionaries.
"""
for k, v in recon_entry.items():
if isinstance(v, dict):
for value in self.nested_get_value(key, v):
yield value
if k == key:
yield v
def object_auditor_check(self, hosts):
"""
Obtain and print obj auditor statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
all_scan = {}
zbf_scan = {}
atime = 'audit_time'
bprocessed = 'bytes_processed'
passes = 'passes'
errors = 'errors'
quarantined = 'quarantined'
recon = Scout("auditor/object", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking auditor stats " % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
if response['object_auditor_stats_ALL']:
all_scan[url] = response['object_auditor_stats_ALL']
if response['object_auditor_stats_ZBF']:
zbf_scan[url] = response['object_auditor_stats_ZBF']
if len(all_scan) > 0:
stats = {}
stats[atime] = [sum(self.nested_get_value(atime, all_scan[i]))
for i in all_scan]
stats[bprocessed] = [sum(self.nested_get_value(bprocessed,
all_scan[i])) for i in all_scan]
stats[passes] = [sum(self.nested_get_value(passes, all_scan[i]))
for i in all_scan]
stats[errors] = [sum(self.nested_get_value(errors, all_scan[i]))
for i in all_scan]
stats[quarantined] = [sum(self.nested_get_value(quarantined,
all_scan[i])) for i in all_scan]
for k in stats:
if None in stats[k]:
stats[k] = [x for x in stats[k] if x is not None]
if len(stats[k]) < 1:
print("[Auditor %s] - No hosts returned valid data." % k)
else:
computed = self._gen_stats(stats[k],
name='ALL_%s_last_path' % k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[ALL_auditor] - No hosts returned valid data.")
else:
print("[ALL_auditor] - No hosts returned valid data.")
if len(zbf_scan) > 0:
stats = {}
stats[atime] = [sum(self.nested_get_value(atime, zbf_scan[i]))
for i in zbf_scan]
stats[bprocessed] = [sum(self.nested_get_value(bprocessed,
zbf_scan[i])) for i in zbf_scan]
stats[errors] = [sum(self.nested_get_value(errors, zbf_scan[i]))
for i in zbf_scan]
stats[quarantined] = [sum(self.nested_get_value(quarantined,
zbf_scan[i])) for i in zbf_scan]
for k in stats:
if None in stats[k]:
stats[k] = [x for x in stats[k] if x is not None]
if len(stats[k]) < 1:
print("[Auditor %s] - No hosts returned valid data." % k)
else:
computed = self._gen_stats(stats[k],
name='ZBF_%s_last_path' % k)
if computed['reported'] > 0:
self._print_stats(computed)
else:
print("[ZBF_auditor] - No hosts returned valid data.")
else:
print("[ZBF_auditor] - No hosts returned valid data.")
print("=" * 79)
def load_check(self, hosts):
"""
Obtain and print load average statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
load1 = {}
load5 = {}
load15 = {}
recon = Scout("load", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking load averages" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
load1[url] = response['1m']
load5[url] = response['5m']
load15[url] = response['15m']
stats = {"1m": load1, "5m": load5, "15m": load15}
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(),
name='%s_load_avg' % item)
self._print_stats(computed)
else:
print("[%s_load_avg] - No hosts returned valid data." % item)
print("=" * 79)
def quarantine_check(self, hosts):
"""
Obtain and print quarantine statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
objq = {}
conq = {}
acctq = {}
stats = {}
recon = Scout("quarantined", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking quarantine" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
objq[url] = response['objects']
conq[url] = response['containers']
acctq[url] = response['accounts']
for key in response.get('policies', {}):
pkey = "objects_%s" % key
stats.setdefault(pkey, {})
stats[pkey][url] = response['policies'][key]['objects']
stats.update({"objects": objq, "containers": conq, "accounts": acctq})
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(),
name='quarantined_%s' % item)
self._print_stats(computed)
else:
print("No hosts returned valid data.")
print("=" * 79)
def socket_usage(self, hosts):
"""
Obtain and print /proc/net/sockstat statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
inuse4 = {}
mem = {}
inuse6 = {}
timewait = {}
orphan = {}
recon = Scout("sockstat", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking socket usage" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
inuse4[url] = response['tcp_in_use']
mem[url] = response['tcp_mem_allocated_bytes']
inuse6[url] = response.get('tcp6_in_use', 0)
timewait[url] = response['time_wait']
orphan[url] = response['orphan']
stats = {"tcp_in_use": inuse4, "tcp_mem_allocated_bytes": mem,
"tcp6_in_use": inuse6, "time_wait": timewait,
"orphan": orphan}
for item in stats:
if len(stats[item]) > 0:
computed = self._gen_stats(stats[item].values(), item)
self._print_stats(computed)
else:
print("No hosts returned valid data.")
print("=" * 79)
def disk_usage(self, hosts, top=0, lowest=0, human_readable=False):
"""
Obtain and print disk usage statistics
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
stats = {}
highs = []
lows = []
raw_total_used = []
raw_total_avail = []
percents = {}
top_percents = [(None, 0)] * top
low_percents = [(None, 100)] * lowest
recon = Scout("diskusage", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking disk usage now" % self._ptime())
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status == 200:
hostusage = []
for entry in response:
if not isinstance(entry['mounted'], bool):
print("-> %s/%s: Error: %s" % (url, entry['device'],
entry['mounted']))
elif entry['mounted']:
used = float(entry['used']) / float(entry['size']) \
* 100.0
raw_total_used.append(entry['used'])
raw_total_avail.append(entry['avail'])
hostusage.append(round(used, 2))
for ident, oused in top_percents:
if oused < used:
top_percents.append(
(url + ' ' + entry['device'], used))
top_percents.sort(key=lambda x: -x[1])
top_percents.pop()
break
for ident, oused in low_percents:
if oused > used:
low_percents.append(
(url + ' ' + entry['device'], used))
low_percents.sort(key=lambda x: x[1])
low_percents.pop()
break
stats[url] = hostusage
for url in stats:
if len(stats[url]) > 0:
# get per host hi/los for another day
low = min(stats[url])
high = max(stats[url])
highs.append(high)
lows.append(low)
for percent in stats[url]:
percents[int(percent)] = percents.get(int(percent), 0) + 1
else:
print("-> %s: Error. No drive info available." % url)
if len(lows) > 0:
low = min(lows)
high = max(highs)
# dist graph shamelessly stolen from https://github.com/gholt/tcod
print("Distribution Graph:")
mul = 69.0 / max(percents.values())
for percent in sorted(percents):
print('% 3d%%%5d %s' % (percent, percents[percent],
'*' * int(percents[percent] * mul)))
raw_used = sum(raw_total_used)
raw_avail = sum(raw_total_avail)
raw_total = raw_used + raw_avail
avg_used = 100.0 * raw_used / raw_total
if human_readable:
raw_used = size_suffix(raw_used)
raw_avail = size_suffix(raw_avail)
raw_total = size_suffix(raw_total)
print("Disk usage: space used: %s of %s" % (raw_used, raw_total))
print("Disk usage: space free: %s of %s" % (raw_avail, raw_total))
print("Disk usage: lowest: %s%%, highest: %s%%, avg: %s%%" %
(low, high, avg_used))
else:
print("No hosts returned valid data.")
print("=" * 79)
if top_percents:
print('TOP %s' % top)
for ident, used in top_percents:
if ident:
url, device = ident.split()
host = urlparse(url).netloc.split(':')[0]
print('%.02f%% %s' % (used, '%-15s %s' % (host, device)))
if low_percents:
print('LOWEST %s' % lowest)
for ident, used in low_percents:
if ident:
url, device = ident.split()
host = urlparse(url).netloc.split(':')[0]
print('%.02f%% %s' % (used, '%-15s %s' % (host, device)))
def time_check(self, hosts, jitter=0.0):
"""
Check a time synchronization of hosts with current time
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
:param jitter: Maximal allowed time jitter
"""
jitter = abs(jitter)
matches = 0
errors = 0
recon = Scout("time", self.verbose, self.suppress_errors,
self.timeout)
print("[%s] Checking time-sync" % self._ptime())
for url, ts_remote, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status != 200:
errors = errors + 1
continue
if (ts_remote + jitter < ts_start or ts_remote - jitter > ts_end):
diff = abs(ts_end - ts_remote)
ts_end_f = self._ptime(ts_end)
ts_remote_f = self._ptime(ts_remote)
print("!! %s current time is %s, but remote is %s, "
"differs by %.4f sec" % (
url,
ts_end_f,
ts_remote_f,
diff))
continue
matches += 1
if self.verbose:
print("-> %s matches." % url)
print("%s/%s hosts matched, %s error[s] while checking hosts." % (
matches, len(hosts), errors))
print("=" * 79)
def version_check(self, hosts):
"""
Check OS Swift version of hosts. Inform if differs.
:param hosts: set of hosts to check. in the format of:
set([('127.0.0.1', 6020), ('127.0.0.2', 6030)])
"""
versions = set()
errors = 0
print("[%s] Checking versions" % self._ptime())
recon = Scout("version", self.verbose, self.suppress_errors,
self.timeout)
for url, response, status, ts_start, ts_end in self.pool.imap(
recon.scout, hosts):
if status != 200:
errors = errors + 1
continue
versions.add(response['version'])
if self.verbose:
print("-> %s installed version %s" % (
url, response['version']))
if not len(versions):
print("No hosts returned valid data.")
elif len(versions) == 1:
print("Versions matched (%s), "
"%s error[s] while checking hosts." % (
versions.pop(), errors))
else:
print("Versions not matched (%s), "
"%s error[s] while checking hosts." % (
", ".join(sorted(versions)), errors))
print("=" * 79)
def _get_ring_names(self, policy=None):
"""
Retrieve name of ring files.
If no policy is passed and the server type is object,
the ring names of all storage-policies are retrieved.
:param policy: name or index of storage policy, only applicable
with server_type==object.
:returns: list of ring names.
"""
if self.server_type == 'object':
ring_names = [p.ring_name for p in POLICIES if (
p.name == policy or not policy or (
policy.isdigit() and int(policy) == int(p) or
(isinstance(policy, string_types)
and policy in p.aliases)))]
else:
ring_names = [self.server_type]
return ring_names
def main(self):
"""
Retrieve and report cluster info from hosts running recon middleware.
"""
print("=" * 79)
usage = '''
usage: %prog <server_type> [<server_type> [<server_type>]]
[-v] [--suppress] [-a] [-r] [-u] [-d]
[-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat]
[--human-readable]
<server_type>\taccount|container|object
Defaults to object server.
ex: %prog container -l --auditor
'''
args = optparse.OptionParser(usage)
args.add_option('--verbose', '-v', action="store_true",
help="Print verbose info")
args.add_option('--suppress', action="store_true",
help="Suppress most connection related errors")
args.add_option('--async', '-a', action="store_true",
help="Get async stats")
args.add_option('--replication', '-r', action="store_true",
help="Get replication stats")
args.add_option('--auditor', action="store_true",
help="Get auditor stats")
args.add_option('--updater', action="store_true",
help="Get updater stats")
args.add_option('--expirer', action="store_true",
help="Get expirer stats")
args.add_option('--unmounted', '-u', action="store_true",
help="Check cluster for unmounted devices")
args.add_option('--diskusage', '-d', action="store_true",
help="Get disk usage stats")
args.add_option('--human-readable', action="store_true",
help="Use human readable suffix for disk usage stats")
args.add_option('--loadstats', '-l', action="store_true",
help="Get cluster load average stats")
args.add_option('--quarantined', '-q', action="store_true",
help="Get cluster quarantine stats")
args.add_option('--validate-servers', action="store_true",
help="Validate servers on the ring")
args.add_option('--md5', action="store_true",
help="Get md5sum of servers ring and compare to "
"local copy")
args.add_option('--sockstat', action="store_true",
help="Get cluster socket usage stats")
args.add_option('--driveaudit', action="store_true",
help="Get drive audit error stats")
args.add_option('--time', '-T', action="store_true",
help="Check time synchronization")
args.add_option('--jitter', type="float", default=0.0,
help="Maximal allowed time jitter")
args.add_option('--swift-versions', action="store_true",
help="Check swift versions")
args.add_option('--top', type='int', metavar='COUNT', default=0,
help='Also show the top COUNT entries in rank order.')
args.add_option('--lowest', type='int', metavar='COUNT', default=0,
help='Also show the lowest COUNT entries in rank \
order.')
args.add_option('--all', action="store_true",
help="Perform all checks. Equal to \t\t\t-arudlqT "
"--md5 --sockstat --auditor --updater --expirer "
"--driveaudit --validate-servers --swift-versions")
args.add_option('--region', type="int",
help="Only query servers in specified region")
args.add_option('--zone', '-z', type="int",
help="Only query servers in specified zone")
args.add_option('--timeout', '-t', type="int", metavar="SECONDS",
help="Time to wait for a response from a server",
default=5)
args.add_option('--swiftdir', default="/etc/swift",
help="Default = /etc/swift")
args.add_option('--policy', '-p',
help='Only query object servers in specified '
'storage policy (specified as name or index).')
options, arguments = args.parse_args()
if len(sys.argv) <= 1 or len(arguments) > len(self.check_types):
args.print_help()
sys.exit(0)
if arguments:
arguments = set(arguments)
if arguments.issubset(self.check_types):
server_types = arguments
else:
print("Invalid Server Type")
args.print_help()
sys.exit(1)
else: # default
server_types = ['object']
swift_dir = options.swiftdir
if set_swift_dir(swift_dir):
reload_storage_policies()
self.verbose = options.verbose
self.suppress_errors = options.suppress
self.timeout = options.timeout
for server_type in server_types:
self.server_type = server_type
ring_names = self._get_ring_names(options.policy)
if not ring_names:
print('Invalid Storage Policy: %s' % options.policy)
args.print_help()
sys.exit(0)
hosts = self.get_hosts(options.region, options.zone,
swift_dir, ring_names)
print("--> Starting reconnaissance on %s hosts (%s)" %
(len(hosts), self.server_type))
print("=" * 79)
if options.all:
if self.server_type == 'object':
self.async_check(hosts)
self.object_auditor_check(hosts)
self.updater_check(hosts)
self.expirer_check(hosts)
elif self.server_type == 'container':
self.auditor_check(hosts)
self.updater_check(hosts)
elif self.server_type == 'account':
self.auditor_check(hosts)
self.replication_check(hosts)
self.umount_check(hosts)
self.load_check(hosts)
self.disk_usage(hosts, options.top, options.lowest,
options.human_readable)
self.get_ringmd5(hosts, swift_dir)
self.get_swiftconfmd5(hosts)
self.quarantine_check(hosts)
self.socket_usage(hosts)
self.server_type_check(hosts)
self.driveaudit_check(hosts)
self.time_check(hosts, options.jitter)
self.version_check(hosts)
else:
if options.async:
if self.server_type == 'object':
self.async_check(hosts)
else:
print("Error: Can't check asyncs on non object "
"servers.")
print("=" * 79)
if options.unmounted:
self.umount_check(hosts)
if options.replication:
self.replication_check(hosts)
if options.auditor:
if self.server_type == 'object':
self.object_auditor_check(hosts)
else:
self.auditor_check(hosts)
if options.updater:
if self.server_type == 'account':
print("Error: Can't check updaters on account "
"servers.")
print("=" * 79)
else:
self.updater_check(hosts)
if options.expirer:
if self.server_type == 'object':
self.expirer_check(hosts)
else:
print("Error: Can't check expired on non object "
"servers.")
print("=" * 79)
if options.validate_servers:
self.server_type_check(hosts)
if options.loadstats:
self.load_check(hosts)
if options.diskusage:
self.disk_usage(hosts, options.top, options.lowest,
options.human_readable)
if options.md5:
self.get_ringmd5(hosts, swift_dir)
self.get_swiftconfmd5(hosts)
if options.quarantined:
self.quarantine_check(hosts)
if options.sockstat:
self.socket_usage(hosts)
if options.driveaudit:
self.driveaudit_check(hosts)
if options.time:
self.time_check(hosts, options.jitter)
if options.swift_versions:
self.version_check(hosts)
def main():
try:
reconnoiter = SwiftRecon()
reconnoiter.main()
except KeyboardInterrupt:
print('\n')
| |
#!/usr/bin/env python
import numpy as np
import os, sys, netCDF4
import argparse
ap=argparse.ArgumentParser()
ap.add_argument('-m') # mesh file
ap.add_argument('-p') # problem number
ap.add_argument('-alpha') # implicit solver 1
ap.add_argument('-dsp') # absolute output 1
# Exodus mesh file
fin= ap.parse_args().m
idcase=int(ap.parse_args().p)
if not idcase in [10,11,102]:
print 'Choose a problem number in (10, 11, 102)'
sys.exit(0)
nc = netCDF4.Dataset(fin)
dim = 2
# Define model type
if ap.parse_args().dsp==None:
dsp_hyb = 0
else:
dsp_hyb = min(1,int(ap.parse_args().dsp))
if ap.parse_args().alpha==None:
galpha = 0
else:
galpha = min(1,int(ap.parse_args().alpha))
if galpha:
t = 16; dt = 0.0025; dt_dyn = dt
else:
t = 0.07; dt = 0.07; dt_dyn = 0.0025
nviz = 16
nviz_dyn = nviz
nviz_wave = nviz
nviz_slip = nviz
dsp=1; dsp_str=1; rsf=0; bod_frc=0; hyb=1; t_lim=10.; init=0
alpha= 0.; beta = 0.00125; rfrac = 0
if idcase==102: rsf=1; v_bg=1E-12
if galpha:
line1 = ["alpha qua 12"]
else:
line1 = ["fault qua 12"]
line3 = np.array([t, dt, nviz, dsp]).reshape(1,4)
line4 = np.array([dt_dyn*1E2, dt_dyn, nviz_dyn, t_lim, dsp_hyb, dsp_str, bod_frc,hyb,rsf,init]).reshape(1,10)
if rsf==0:
line5 = np.array([nviz_wave,nviz_slip]).reshape(1,2)
else:
line5 = np.array([nviz_wave,nviz_slip,v_bg]).reshape(1,3)
line6 = np.array([alpha, beta, rfrac]).reshape(1,3)
# node data
print 'Extracting mesh...'
coord = np.hstack((nc.variables['coordx'][:].\
reshape(len(nc.variables['coordx']),1),\
nc.variables['coordy'][:].\
reshape(len(nc.variables['coordy']),1)))
qd_node = np.empty(shape=[0, 4], dtype=np.uint32)
rho=np.array([2700.])
vp =np.array([5716.])
vs =np.array([3300.])
E_dyn=rho*vs**2*(3*vp**2-4*vs**2)/(vp**2-vs**2)
nu_dyn=(vp**2-2*vs**2)/2/(vp**2-vs**2)
E=E_dyn
nu=nu_dyn
#solid viscosity; power law
visc = 1E25; r = 1.
mat = [[E[0],nu[0],visc,r,rho[0],E_dyn[0],nu_dyn[0]]]
mat_typ = np.empty(shape = (0,1), dtype=np.uint32)
for i in nc.variables['eb_prop1'][:]:
cnct = nc.variables["connect"+str(i)][:]
n_elem = len(cnct)
cnct = cnct.reshape(n_elem*4)
cnct = cnct.reshape(n_elem,4)
qd_node = np.vstack((qd_node, cnct))
mat_typ = np.vstack((mat_typ, i*np.ones((len(cnct),1))))
print '%d nodes, %d elements' %(len(coord), len(qd_node))
print 'Forming fault constraints...'
id_tmp = nc.variables['ss_prop1'][0]
el_flt = nc.variables['elem_ss' + str(id_tmp)]
sd_flt = nc.variables['side_ss' + str(id_tmp)]
nd_flt = qd_node[el_flt[:]-1,:]
nd_tap = np.empty((0,2),dtype=np.uint32)
nd_flt_p = np.empty((0),dtype=np.uint32)
crd_flt_p = np.empty((0,2),dtype=float)
nd_flt_n = np.empty((0),dtype=np.uint32)
crd_flt_n = np.empty((0,2),dtype=float)
sd_flt_p = np.empty((0),dtype=np.uint32)
spair_flt = np.empty((0,2),dtype=np.uint32)
for i in range(len(el_flt)):
el = el_flt[i]
sd = sd_flt[i]
nd = nd_flt[i,:]
if sd ==1: nd_on = nd[[0,1]];idn=[0,1];nd_off=nd[[2,3]]
elif sd ==2: nd_on = nd[[1,2]];idn=[1,2];nd_off=nd[[0,3]]
elif sd ==3: nd_on = nd[[2,3]];idn=[2,3];nd_off=nd[[0,1]]
elif sd ==4: nd_on = nd[[3,0]];idn=[3,0];nd_off=nd[[1,2]]
# negative side has elements lower right than the fault
if sum(coord[nd_on-1,1]-coord[nd_on-1,0]) > sum(coord[nd_off-1,1]-coord[nd_off-1,0]):
for j in range(2):
if nd_on[j] in nd_tap[:,0]:
nd_add = nd_tap[nd_tap[:,0]==nd_on[j],1]
qd_node[el-1,idn[j]] = nd_add
else:
nd_add = len(coord)+len(nd_tap)+1
qd_node[el-1,idn[j]] = nd_add
nd_tap = np.vstack((nd_tap,[nd_on[j],nd_add]))
nd_flt_n = np.hstack((nd_flt_n, nd_add))
crd_flt_n = np.vstack((crd_flt_n, coord[nd_on[j]-1,:]))
else:
for j in range(2):
if not (nd_on[j] in nd_flt_p):
nd_flt_p = np.hstack((nd_flt_p, nd_on[j]))
crd_flt_p = np.vstack((crd_flt_p, coord[nd_on[j]-1,:]))
sd_flt_p = np.hstack((sd_flt_p,sd))
spair_flt = np.vstack((spair_flt,nd_on))
# remove split node at tip, except at free boundary y==0
crd_add=np.empty((0),dtype=bool)
for i in range(len(nd_tap)):
loc = qd_node==nd_tap[i,1]
loc_flt = nd_flt_n==nd_tap[i,1]
if sum(sum(loc))<2 and coord[nd_tap[i,0]-1,1] < 0.:
qd_node[loc] = nd_tap[i,0]
nd_flt_n[loc_flt] = nd_tap[i,0]
crd_add=np.hstack((crd_add,False))
else:
qd_node[loc] = qd_node[loc]-sum(~crd_add)
nd_flt_n[loc_flt] = nd_flt_n[loc_flt]-sum(~crd_add)
crd_add=np.hstack((crd_add,True))
coord = np.vstack((coord,crd_flt_n[crd_add,:]))
# Pair fault nodes
ft_map = np.array(np.array(np.all((crd_flt_p[:,None,:]\
==crd_flt_n[None,:,:]),axis=-1).nonzero()).T.tolist())
nd_flt_n = nd_flt_n[ft_map[:,1]]
nd_flt_p = nd_flt_p[ft_map[:,0]]
crd_flt_n = crd_flt_n[ft_map[:,1],:]
crd_flt_p = crd_flt_n[ft_map[:,0],:]
crd_add = crd_add[ft_map[:,1]]
nd_flt_n = nd_flt_n[crd_add]
nd_flt_p = nd_flt_p[crd_add]
crd_flt_n = crd_flt_n[crd_add,:]
crd_flt_p = crd_flt_p[crd_add,:]
# Fault's strike and normal vectors
vec_fn = np.zeros((len(nd_flt_p), 2), dtype=float)
vec_fs = np.zeros((len(nd_flt_p), 2), dtype=float)
for i in range(len(spair_flt)):
v1 = coord[spair_flt[i,0]-1,:]
v2 = coord[spair_flt[i,1]-1,:]
vec1 = v2 - v1
vec1 = -vec1*np.sign(vec1[1])
vec2 = np.array([vec1[1], -vec1[0]])
vec2 = vec2*np.sign(vec2[1])
row = np.squeeze(np.hstack((np.where(nd_flt_p==spair_flt[i,0]),\
np.where(nd_flt_p==spair_flt[i,1]))))
vec_fs[row,:] += vec1
vec_fn[row,:] += vec2
if idcase==102:
vec_fs[row,:] += [-1.,0.]
vec_fn[row,:] += [0., 1.]
vec_fs /= (np.ones((2,1))*np.linalg.norm(vec_fs, axis=1)).T
vec_fn /= (np.ones((2,1))*np.linalg.norm(vec_fn, axis=1)).T
vecf = np.empty(shape=(0,6))
xfnd = np.empty(shape=(0,2))
nfnd = len(nd_flt_p)
# Define frictional parameters (static friction), and add initial stress.
st_init = np.zeros((nfnd,2))
frc = np.empty((nfnd,1),dtype=np.uint32)
if idcase in [10, 11]:
fc = np.empty((nfnd,1),dtype=float)
fcd = np.empty((nfnd,1),dtype=float)
dc = np.empty((nfnd,1),dtype=float)
coh = np.empty((nfnd,1),dtype=float)
dcoh = np.empty((nfnd,1),dtype=float)
if idcase==10:
fc_stat = .76
else:
fc_stat = 0.57 # SCEC10 0.76, SCEC11 0.57
for node_pos, i in zip(nd_flt_p,range(len(nd_flt_p))):
y = coord[node_pos - 1,1]
dip = y*2./np.sqrt(3)
stn = 7378*dip*1E3
if abs(dip+12.)<=1.5:
sts = 2E5 - (fc_stat+.0057)*stn
st_init[i,:] = [sts, stn]
fc[i] = fc_stat
fcd[i] = .448
dc[i] = .5
coh[i] = 2E5
dcoh[i] = 1E-6
frc[i] = 1
elif dip>=-15.:
sts = -.55*stn
st_init[i,:] = [sts, stn]
fc[i] = fc_stat
fcd[i] = .448
dc[i] = .5
coh[i] = 2E5
dcoh[i] = 1E-6
frc[i] = 1
else:
sts = -.55*stn
st_init[i,:] = [sts, stn]
fc[i] = 1E4
fcd[i] = .448
dc[i] = .5
coh[i] = 1E9
dcoh[i] = 1E-6
frc[i] = 1
else: # Rate-state parameters
tau0=75E6;sn0=120E6
a = .008*np.ones((nfnd,1)); b0=0.6*np.ones((nfnd,1));V0=1E-6*np.ones((nfnd,1))
dtau0=25E6*np.ones((nfnd,1)); b=.012*np.ones((nfnd,1));L=.02*np.ones((nfnd,1))
W=15.;w=3.;da0=.008;theta_init=np.empty((nfnd,1),dtype=np.float)
coh = np.empty((nfnd,1),dtype=float)
dcoh = np.ones((nfnd,1))*1E-6
for node_pos, i in zip(nd_flt_p,range(len(nd_flt_p))):
x = coord[node_pos - 1,0]
st_init[i,:]=[tau0,-sn0]
if abs(x)<=W:
Bx=1.
frc[i]=1
elif abs(x)>W and abs(x)<W+w:
Bx=.5*(1+np.tanh(w/(abs(x)-W-w)+w/(abs(x)-W)))
frc[i]=1
else:
Bx=0.
frc[i]=0
if abs(x)<w/2:
coh[i]=0.
else:
coh[i]=1.75E6
a[i]=a[i]+da0*(1.-Bx)
theta_init[i]=L[i]/V0[i]*np.exp((a[i]*np.log(2.*np.sinh(tau0/a[i]/sn0))-b0[i]-a[i]*np.log(v_bg/V0[i]))/b[i])
# Observation info
if idcase==102:
ogrid = np.array([[-12., -6.],
[ 12., -6.],
[-12., 6.],
[ 12., 6.],
[ 0., 9.],
[ 0., -9.]])
elif idcase in [10, 11]:
ogrid = np.array([[-3., 0.],
[-2., 0.],
[-1., 0.],
[ 1., 0.],
[ 2., 0.],
[ 3., 0.],
[-1., -.3],
[-.5, -.3],
[ .5, -.3],
[ 1., -.3]])
#----------------------Boundary ID-------------------------
# Side/nodesets: 0 fault, 1 upper, 2 left, 3 lower, 4 right
#----------------------------------------------------------
bnd_el = []
# Traction and abs boundaries,id 0 preserved for fault faces
for i in nc.variables['ss_prop1'][1:]:
els = nc.variables['elem_ss' + str(i)][:]
sides = nc.variables['side_ss' + str(i)][:]
bnd_el.append(np.hstack((els.reshape(len(els),1),sides.reshape(len(sides),1))))
trac_el1 = bnd_el[3]
trac_el2 = bnd_el[2]
abs_bc1 = bnd_el[0]
abs_bc2 = bnd_el[1]
# fixed nodes
bcy_nodes = nc.variables['node_ns2'][:]
bcx_nodes = nc.variables['node_ns3'][:]
bc_typ = np.ones((len(coord),2), dtype=np.int8)
for node in bcx_nodes:
bc_typ[node - 1, 0] = 0
for node in bcy_nodes:
bc_typ[node - 1, 1] = 0
# traction bc
trac_val = [5E4, -5E4]
trac_bc1 = np.zeros(shape=[len(trac_el1), 4])
trac_bc2 = np.zeros(shape=[len(trac_el2), 4])
trac_bc1[:,0] = trac_val[0]; trac_bc1[:,2] = 0.; trac_bc1[:,3] = 0.
trac_bc2[:,1] = trac_val[1]; trac_bc2[:,2] = 0.; trac_bc2[:,3] = 0.
trac_el = np.vstack((trac_el1, trac_el2))
trac_bc = np.vstack((trac_bc1, trac_bc2))
# absorbing bc
abs_bc1 = np.hstack((abs_bc1, 2*np.ones((len(abs_bc1),1))))
abs_bc2 = np.hstack((abs_bc2, np.ones((len(abs_bc2),1))))
abs_bc3 = np.hstack((trac_el1, np.ones((len(trac_el1),1))))
abs_bc4 = np.hstack((trac_el2,2*np.ones((len(trac_el2),1))))
if idcase==102:
abs_bc = np.vstack((abs_bc1, abs_bc2, abs_bc3, abs_bc4))
else:
abs_bc = np.vstack((abs_bc1, abs_bc2, abs_bc3))
# abs_bc4 upper bound can be free boundary for top surface
# Total length of constraint function
neqNCF=0 # Zero non-conforming constraint equations.
nfnode=0
neqFT=dim*nfnd
neq = neqNCF+neqFT
print '%d NCF and %d fault constraint equations.' %(neqNCF,neqFT)
# Export to Defmod .inp file
fout = 'SCEC'+str(idcase)+'-2D'
if dsp_hyb: fout += '_dsp'
if galpha: fout += '-alpha'
fout += '.inp'
print 'Write to ' + fout + '...'
if os.path.isfile(fout): os.remove(fout)
f = open(fout, 'a')
line2 = np.array([len(qd_node), len(coord), len(mat), neq,\
nfnode, len(trac_el), len(abs_bc), nfnd, len(ogrid), neqNCF]).reshape(1,10)
np.savetxt(f, line1, fmt='%s')
np.savetxt(f, line2, delimiter=' ', fmt='%d '*10)
np.savetxt(f, line3, delimiter=' ', fmt='%g %g %d %d')
np.savetxt(f, line4, delimiter=' ', fmt='%g %g %d %g %d %d %d %d %d %d')
if rsf==0:
np.savetxt(f, line5, delimiter=' ', fmt='%d %d')
else:
np.savetxt(f, line5, delimiter=' ', fmt='%d %d %g')
np.savetxt(f, line6, delimiter=' ', fmt='%g %g %g')
np.savetxt(f, np.column_stack((qd_node, mat_typ)), delimiter=' ', fmt='%d %d %d %d %d')
np.savetxt(f, np.column_stack((coord, bc_typ)) , delimiter = ' ', fmt='%g %g %d %d')
np.savetxt(f, mat, delimiter=' ', fmt = '%g '*7)
# fault slip: strike and open
n = [2]
ft_neg_nodes_tap = []
t_slip = [0., 0.]; val=0.
cval1 = np.hstack((val, t_slip)).reshape(1,3)
cval2 = np.hstack((val, t_slip)).reshape(1,3)
for nd_p, nd_n, i in zip(nd_flt_p, nd_flt_n, range(len(nd_flt_p))):
vec1 = [[1, 0, nd_p],
[-1, 0, nd_n]]
vec2 = [[0, 1, nd_p],
[0,-1, nd_n]]
mat_ft = np.hstack((vec_fs[i,:].reshape(2,1),\
vec_fn[i,:].reshape(2,1)))
mat_f = np.matrix.transpose(mat_ft).reshape(1,4)
np.savetxt(f, n, fmt = '%d')
np.savetxt(f, vec1, delimiter = ' ', fmt = '%g %g %d')
np.savetxt(f, cval1, delimiter = ' ', fmt = "%1.2E %g %g")
np.savetxt(f, n, fmt = '%d')
np.savetxt(f, vec2, delimiter = ' ', fmt = '%g %g %d')
np.savetxt(f, cval2, delimiter = ' ', fmt = "%1.2E %g %g")
vecf = np.vstack((vecf,np.hstack(([[nd_p, nd_n]], mat_f))))
xfnd = np.vstack((xfnd,coord[nd_p-1,:]))
# Write fault orientation tensor + frictional parameters
if rsf==1:
np.savetxt(f, np.hstack((vecf, b0, V0, dtau0, a, b, L, theta_init, st_init, xfnd, frc,coh,dcoh)), delimiter = ' ',\
fmt = '%d '*2 + '%g '*4 + '%g '*7 + '%g '*2 + '%g '*2 + '%d ' + '%g '*2)
else:
np.savetxt(f, np.hstack((vecf, fc, fcd, dc, st_init, xfnd, frc,coh,dcoh)), delimiter = ' ', \
fmt = '%d '*2 + '%g '*4 + '%g '*3 + '%g '*2 + '%g '*2 + '%d ' + '%g '*2)
# Point force/source and boundary traction/flux
np.savetxt(f, np.column_stack((trac_el, trac_bc)), delimiter=' ',\
fmt ='%d %d %1.2E %1.2E %g %g')
# Observation grid
if len(ogrid)>0:
np.savetxt(f, ogrid , delimiter = ' ', fmt='%g '*2)
# Abs boundary
np.savetxt(f, abs_bc, delimiter=' ', fmt='%d %d %d')
f.close();
print 'Defmod file ' + fout + ' created'
| |
from pyomo.environ import *
def initialize_generators(model,
generator_names=None,
generator_at_bus=None):
model.Generators = Set(initialize=generator_names)
model.GeneratorsAtBus = Set(model.Buses, initialize=generator_at_bus)
def initialize_non_dispatchable_generators(model,
non_dispatchable_generator_names=None,
non_dispatchable_generator_at_bus=None):
model.NonDispatchableGenerators = Set(initialize=non_dispatchable_generator_names)
model.NonDispatchableGeneratorsAtBus = Set(model.Buses, initialize=non_dispatchable_generator_at_bus)
def generation_maximum_power_init(model):
for g, t in model.EnforceMaximumPower:
model.MaximumPowerAvailable[g, t] = model.EnforceMaximumPower[g, t]
model.MaximumPowerAvailable[g, t].fixed = True
def generator_bus_contribution_factor(model):
model.GeneratorBusContributionFactor = Param(model.Generators, model.Buses, within=NonNegativeReals, default=1.0)
def initialize_maximum_power(model):
# maximum power output for each generator, at each time period.
model.MaximumPowerAvailable = Var(model.Generators, model.TimePeriods, within=NonNegativeReals)
model.EnforceMaximumPower = Param(model.Generators, model.TimePeriods, within=NonNegativeReals)
def failure_probablity(model):
# TODO : Add validator
model.FailureProbablity = Param(model.Generators, default=0.0)
def forced_outage(model):
model.GeneratorForcedOutage = Param(model.Generators * model.TimePeriods, within=Binary, default=False)
def maximum_minimum_power_output_generators(model, minimum_power_output=None, maximum_power_output=None):
# TODO add validation that maximum power output is greater than minimum power output
model.MinimumPowerOutput = Param(model.Generators, model.TimePeriods, initialize=minimum_power_output, within=Reals, default=0.0)
model.MaximumPowerOutput = Param(model.Generators, model.TimePeriods, initialize=maximum_power_output, within=NonNegativeReals, default=0.0)
def maximum_minimim_power_output_non_dispatchable_generators(model, minimum_power_output=None, maximum_power_output=None):
model.MinimumNonDispatchablePowerOutput = Param(model.NonDispatchableGenerators,
model.TimePeriods,
within=NonNegativeReals,
default=0.0,
mutable=True)
model.MaximumNonDispatchablePowerOutput = Param(model.NonDispatchableGenerators,
model.TimePeriods,
within=NonNegativeReals,
default=0.0,
mutable=True)
def ramp_up_ramp_down_limits(model, ramp_up_limits=None, ramp_down_limits=None):
#NRU_j
model.NominalRampUpLimit = Param(model.Generators, within=NonNegativeReals, initialize=ramp_up_limits)
#NRD_j
model.NominalRampDownLimit = Param(model.Generators, within=NonNegativeReals, initialize=ramp_down_limits)
def start_up_shut_down_ramp_limits(model, start_up_ramp_limits=None, shut_down_ramp_limits=None):
#NSU_j
model.StartupRampLimit = Param(model.Generators, within=NonNegativeReals, initialize=start_up_ramp_limits)
#NSD_j
model.ShutdownRampLimit = Param(model.Generators, within=NonNegativeReals, initialize=shut_down_ramp_limits)
# TODO scaling
def minimum_up_minimum_down_time(model, minimum_up_time=None, minimum_down_time=None):
#UT_j
model.MinimumUpTime = Param(model.Generators, within=NonNegativeIntegers, default=0, initialize=minimum_up_time)
#DT_j
model.MinimumDownTime = Param(model.Generators, within=NonNegativeIntegers, default=0, initialize=minimum_down_time)
def _initial_time_periods_online_rule(m, g):
if not value(m.UnitOnT0[g]):
return 0
else:
return int(min(value(m.NumTimePeriods),
round(max(0, value(m.MinimumUpTime[g]) - value(m.UnitOnT0State[g])) / value(m.TimePeriodLength))))
def _initial_time_periods_offline_rule(m, g):
if value(m.UnitOnT0[g]):
return 0
else:
return int(min(value(m.NumTimePeriods),
round(max(0, value(m.MinimumDownTime[g]) + value(m.UnitOnT0State[g])) / value(m.TimePeriodLength)))) # m.UnitOnT0State is negative if unit is off
def initial_state(model, initial_state=None,
initial_time_periods_online=_initial_time_periods_online_rule,
initial_time_periods_offline=_initial_time_periods_offline_rule
):
model.UnitOnT0State = Param(model.Generators, within=Reals, initialize=initial_state, mutable=True)
def t0_unit_on_rule(m, g):
return int(value(m.UnitOnT0State[g]) >= 1)
#v_j(0) --> Value follows immediated from \hat{v}_j value. DON'T SET
model.UnitOnT0 = Param(model.Generators, within=Binary, initialize=t0_unit_on_rule, mutable=True)
#Calculated
model.InitialTimePeriodsOnLine = Param(model.Generators, within=NonNegativeIntegers, initialize=_initial_time_periods_online_rule, mutable=True)
#Calcualted
model.InitialTimePeriodsOffLine = Param(model.Generators, within=NonNegativeIntegers, initialize=_initial_time_periods_offline_rule, mutable=True)
def hot_start_cold_start_costs(model,
hot_start_costs=None,
cold_start_costs=None,
cold_start_hours=None,
shutdown_cost_coefficient=None):
###############################################
# startup cost parameters for each generator. #
###############################################
#CSH_j
model.ColdStartHours = Param(model.Generators, within=NonNegativeIntegers, default=0, initialize=cold_start_hours) # units are hours.
#HSC_j
model.HotStartCost = Param(model.Generators, within=NonNegativeReals, default=0.0, initialize=hot_start_costs) # units are $.
#CSC_j
model.ColdStartCost = Param(model.Generators, within=NonNegativeReals, default=0.0, initialize=cold_start_costs) # units are $.
##################################################################################
# shutdown cost for each generator. in the literature, these are often set to 0. #
##################################################################################
model.ShutdownCostCoefficient = Param(model.Generators, within=NonNegativeReals, default=0.0, initialize=shutdown_cost_coefficient) # units are $.
def _minimum_production_cost_fn(m, g, t):
# Minimum production cost (needed because Piecewise constraint on ProductionCost
# has to have lower bound of 0, so the unit can cost 0 when off -- this is added
# back in to the objective if a unit is on
if len(m.CostPiecewisePoints[g]) > 1:
return m.CostPiecewiseValues[g].first() * m.FuelCost[g]
elif len(m.CostPiecewisePoints[g]) == 1:
# If there's only one piecewise point given, that point should be (MaxPower, MaxCost) -- i.e. the cost function is linear through (0,0),
# so we can find the slope of the line and use that to compute the cost of running at minimum generation
return m.MinimumPowerOutput[g, t] * (m.CostPiecewiseValues[g].first() / m.MaximumPowerOutput[g]) * m.FuelCost[g]
else:
return m.FuelCost[g] * \
(m.ProductionCostA0[g] + \
m.ProductionCostA1[g] * m.MinimumPowerOutput[g, t] + \
m.ProductionCostA2[g] * m.MinimumPowerOutput[g, t]**2)
def minimum_production_cost(model, minimum_production_cost=_minimum_production_cost_fn):
model.MinimumProductionCost = Param(model.Generators, model.TimePeriods, within=Reals, initialize=_minimum_production_cost_fn, mutable=True)
def quadratic_cost_coefficients(model, production_cost_a=None, production_cost_b=None, production_cost_c=None):
##################################################################################################################
# production cost coefficients (for the quadratic) a0=constant, a1=linear coefficient, a2=quadratic coefficient. #
##################################################################################################################
#\a_j
model.ProductionCostA0 = Param(model.Generators, default=0.0) # units are $/hr (or whatever the time unit is).
#\b_j
model.ProductionCostA1 = Param(model.Generators, default=0.0) # units are $/MWhr.
#\c_j
model.ProductionCostA2 = Param(model.Generators, default=0.0) # units are $/(MWhr^2).
def piece_wise_linear_cost(model, points=None, values=None):
# production cost associated with each generator, for each time period.
model.CostPiecewisePoints = Set(model.Generators, initialize=points, ordered=True)
model.CostPiecewiseValues = Set(model.Generators, initialize=values, ordered=True)
def fuel_cost(model, fuel_cost=1):
model.FuelCost = Param(model.Generators, default=1.0, initialize=fuel_cost)
# a function for use in piecewise linearization of the cost function.
def production_cost_function(m, g, t, x):
return m.TimePeriodLength * m.PowerGenerationPiecewiseValues[g,t][x] * m.FuelCost[g]
def production_cost(model):
model.PowerGenerationPiecewisePoints = {}
model.PowerGenerationPiecewiseValues = {}
for g in model.Generators:
for t in model.TimePeriods:
power_generation_piecewise_points_rule(model, g, t)
def power_generation_piecewise_points_rule(m, g, t):
minimum_production_cost = value(m.MinimumProductionCost[g, t])
if len(m.CostPiecewisePoints[g]) > 0:
m.PowerGenerationPiecewisePoints[g,t] = list(m.CostPiecewisePoints[g])
temp = list(m.CostPiecewiseValues[g])
m.PowerGenerationPiecewiseValues[g,t] = {}
for i in range(len(m.CostPiecewisePoints[g])):
m.PowerGenerationPiecewiseValues[g,t][m.PowerGenerationPiecewisePoints[g,t][i]] = temp[i] - minimum_production_cost
# MinimumPowerOutput will be one of our piecewise points, so it is safe to add (0,0)
if m.PowerGenerationPiecewisePoints[g,t][0] != 0:
m.PowerGenerationPiecewisePoints[g,t].insert(0,0)
m.PowerGenerationPiecewiseValues[g,t][0] = 0
elif value(m.ProductionCostA2[g]) == 0:
# If cost is linear, we only need two points -- (0,CostA0-MinCost) and (MaxOutput, MaxCost)
m.PowerGenerationPiecewisePoints[g, t] = [0, value(m.MaximumPowerOutput[g])]
m.PowerGenerationPiecewiseValues[g,t] = {}
m.PowerGenerationPiecewiseValues[g,t][0] = value(m.ProductionCostA0[g]) - minimum_production_cost
m.PowerGenerationPiecewiseValues[g,t][m.PowerGenerationPiecewisePoints[g,t][1]] = \
value(m.ProductionCostA0[g]) + \
value(m.ProductionCostA1[g]) * m.PowerGenerationPiecewisePoints[g, t][1] \
- minimum_production_cost
else:
min_power = value(m.MinimumPowerOutput[g, t])
max_power = value(m.MaximumPowerOutput[g, t])
n = value(m.NumGeneratorCostCurvePieces)
width = (max_power - min_power) / float(n)
if width == 0:
m.PowerGenerationPiecewisePoints[g, t] = [min_power]
else:
m.PowerGenerationPiecewisePoints[g, t] = [min_power + i*width for i in range(0,n+1)]
# NOTE: due to numerical precision limitations, the last point in the x-domain
# of the generation piecewise cost curve may not be precisely equal to the
# maximum power output level of the generator. this can cause Piecewise to
# sqawk, as it would like the upper bound of the variable to be represented
# in the domain. so, we will make it so.
m.PowerGenerationPiecewisePoints[g, t][-1] = max_power
m.PowerGenerationPiecewiseValues[g,t] = {}
for i in range(n+1):
m.PowerGenerationPiecewiseValues[g,t][m.PowerGenerationPiecewisePoints[g,t][i]] = \
value(m.ProductionCostA0[g]) + \
value(m.ProductionCostA1[g]) * m.PowerGenerationPiecewisePoints[g, t][i] + \
value(m.ProductionCostA2[g]) * m.PowerGenerationPiecewisePoints[g, t][i]**2 \
- minimum_production_cost
if m.PowerGenerationPiecewisePoints[g, t][0] != 0:
m.PowerGenerationPiecewisePoints[g, t].insert(0,0)
m.PowerGenerationPiecewiseValues[g, t][0] = 0
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.sara_set_head_angle import SaraSetHeadAngle
from flexbe_states.log_key_state import LogKeyState
from sara_flexbe_states.list_entities_by_name import list_entities_by_name
from flexbe_states.calculation_state import CalculationState
from sara_flexbe_states.SetKey import SetKey
from flexbe_states.wait_state import WaitState
from sara_flexbe_states.KeepLookingAt import KeepLookingAt
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Sat May 12 2018
@author: Lucas Maurice
'''
class Action_findSM(Behavior):
'''
Find an entity around sara (will only rotate, won't move), identified by entity class.
'''
def __init__(self):
super(Action_findSM, self).__init__()
self.name = 'Action_find'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:84 y:465, x:727 y:360
_state_machine = OperatableStateMachine(outcomes=['done', 'failed'], input_keys=['className'], output_keys=['entity'])
_state_machine.userdata.className = "bottle"
_state_machine.userdata.entity = None
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:30 y:365
_sm_look_at_0 = OperatableStateMachine(outcomes=['end'], input_keys=['ID'])
with _sm_look_at_0:
# x:75 y:135
OperatableStateMachine.add('look at',
KeepLookingAt(),
transitions={'failed': 'look at'},
autonomy={'failed': Autonomy.Off},
remapping={'ID': 'ID'})
# x:798 y:597
_sm_rotation360_1 = OperatableStateMachine(outcomes=['end'])
with _sm_rotation360_1:
# x:42 y:34
OperatableStateMachine.add('Set 180 degres',
SetKey(Value=3.1416),
transitions={'done': 'rotate center'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'rotation'})
# x:610 y:293
OperatableStateMachine.add('Look Right',
SaraSetHeadAngle(pitch=0.6, yaw=1),
transitions={'done': 'Rotate Right'},
autonomy={'done': Autonomy.Off})
# x:406 y:58
OperatableStateMachine.add('Rotate Left',
WaitState(wait_time=5),
transitions={'done': 'rotate center'},
autonomy={'done': Autonomy.Off})
# x:410 y:313
OperatableStateMachine.add('Rotate Right',
WaitState(wait_time=5),
transitions={'done': 'look center 2'},
autonomy={'done': Autonomy.Off})
# x:209 y:50
OperatableStateMachine.add('Look Left',
SaraSetHeadAngle(pitch=0.6, yaw=-1),
transitions={'done': 'Rotate Left'},
autonomy={'done': Autonomy.Off})
# x:216 y:186
OperatableStateMachine.add('Rotate Left 2',
WaitState(wait_time=5),
transitions={'done': 'Look Left'},
autonomy={'done': Autonomy.Off})
# x:630 y:170
OperatableStateMachine.add('wait 5',
WaitState(wait_time=5),
transitions={'done': 'Look Right'},
autonomy={'done': Autonomy.Off})
# x:603 y:47
OperatableStateMachine.add('rotate center',
SaraSetHeadAngle(pitch=0.6, yaw=0),
transitions={'done': 'wait 5'},
autonomy={'done': Autonomy.Off})
# x:208 y:308
OperatableStateMachine.add('look center 2',
SaraSetHeadAngle(pitch=0.6, yaw=0),
transitions={'done': 'Rotate Left 2'},
autonomy={'done': Autonomy.Off})
# x:683 y:188
_sm_find_entity_2 = OperatableStateMachine(outcomes=['found'], input_keys=['className'], output_keys=['entity'])
with _sm_find_entity_2:
# x:181 y:178
OperatableStateMachine.add('find_entity',
list_entities_by_name(frontality_level=0.5, distance_max=2),
transitions={'found': 'Get Entity', 'none_found': 'find_entity'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'className', 'entity_list': 'entity_list', 'number': 'number'})
# x:454 y:178
OperatableStateMachine.add('Get Entity',
CalculationState(calculation=lambda x: x[0]),
transitions={'done': 'found'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'entity_list', 'output_value': 'entity'})
# x:371 y:306, x:130 y:365, x:230 y:365
_sm_look_for_2_sec_3 = ConcurrencyContainer(outcomes=['done'], input_keys=['ID'], conditions=[
('done', [('WaitState 2', 'done')]),
('done', [('Look at', 'end')])
])
with _sm_look_for_2_sec_3:
# x:84 y:166
OperatableStateMachine.add('Look at',
_sm_look_at_0,
transitions={'end': 'done'},
autonomy={'end': Autonomy.Inherit},
remapping={'ID': 'ID'})
# x:345 y:187
OperatableStateMachine.add('WaitState 2',
WaitState(wait_time=3),
transitions={'done': 'done'},
autonomy={'done': Autonomy.Off})
# x:372 y:27, x:370 y:220, x:368 y:100, x:352 y:305, x:460 y:465
_sm_find_entity_while_turning360_4 = ConcurrencyContainer(outcomes=['found', 'not_found'], input_keys=['className'], output_keys=['entity'], conditions=[
('not_found', [('Rotation360', 'end')]),
('found', [('Find Entity', 'found')]),
('not_found', [('wait', 'done')])
])
with _sm_find_entity_while_turning360_4:
# x:131 y:44
OperatableStateMachine.add('Find Entity',
_sm_find_entity_2,
transitions={'found': 'found'},
autonomy={'found': Autonomy.Inherit},
remapping={'className': 'className', 'entity': 'entity'})
# x:129 y:197
OperatableStateMachine.add('Rotation360',
_sm_rotation360_1,
transitions={'end': 'not_found'},
autonomy={'end': Autonomy.Inherit})
# x:149 y:306
OperatableStateMachine.add('wait',
WaitState(wait_time=30),
transitions={'done': 'not_found'},
autonomy={'done': Autonomy.Off})
with _state_machine:
# x:55 y:41
OperatableStateMachine.add('Look Front Center',
SaraSetHeadAngle(pitch=0.7, yaw=0),
transitions={'done': 'Find Entity WHILE Turning360'},
autonomy={'done': Autonomy.Off})
# x:345 y:156
OperatableStateMachine.add('Look Center Not Found',
SaraSetHeadAngle(pitch=0.7, yaw=0),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off})
# x:59 y:376
OperatableStateMachine.add('Log Entity',
LogKeyState(text="Found entity: {}", severity=Logger.REPORT_HINT),
transitions={'done': 'done'},
autonomy={'done': Autonomy.Off},
remapping={'data': 'entity'})
# x:26 y:121
OperatableStateMachine.add('Find Entity WHILE Turning360',
_sm_find_entity_while_turning360_4,
transitions={'found': 'get ID', 'not_found': 'Look Center Not Found'},
autonomy={'found': Autonomy.Inherit, 'not_found': Autonomy.Inherit},
remapping={'className': 'className', 'entity': 'entity'})
# x:45 y:290
OperatableStateMachine.add('look for 2 sec',
_sm_look_for_2_sec_3,
transitions={'done': 'Log Entity'},
autonomy={'done': Autonomy.Inherit},
remapping={'ID': 'ID'})
# x:62 y:211
OperatableStateMachine.add('get ID',
CalculationState(calculation=lambda x: x.ID),
transitions={'done': 'look for 2 sec'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'entity', 'output_value': 'ID'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| |
#!/usr/bin/env python
"""Module to setup an ACME CA server environment able to run multiple tests in parallel"""
import argparse
import errno
import json
import os
from os.path import join
import shutil
import subprocess
import sys
import tempfile
import time
from types import TracebackType
from typing import Any
from typing import cast
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
from typing import Type
import requests
# pylint: disable=wildcard-import,unused-wildcard-import
from certbot_integration_tests.utils import misc
from certbot_integration_tests.utils import pebble_artifacts
from certbot_integration_tests.utils import proxy
from certbot_integration_tests.utils.constants import *
class ACMEServer:
"""
ACMEServer configures and handles the lifecycle of an ACME CA server and an HTTP reverse proxy
instance, to allow parallel execution of integration tests against the unique http-01 port
expected by the ACME CA server.
Typically all pytest integration tests will be executed in this context.
ACMEServer gives access the acme_xdist parameter, listing the ports and directory url to use
for each pytest node. It exposes also start and stop methods in order to start the stack, and
stop it with proper resources cleanup.
ACMEServer is also a context manager, and so can be used to ensure ACME server is
started/stopped upon context enter/exit.
"""
def __init__(self, acme_server: str, nodes: List[str], http_proxy: bool = True,
stdout: bool = False, dns_server: Optional[str] = None,
http_01_port: int = DEFAULT_HTTP_01_PORT) -> None:
"""
Create an ACMEServer instance.
:param str acme_server: the type of acme server used (boulder-v2 or pebble)
:param list nodes: list of node names that will be setup by pytest xdist
:param bool http_proxy: if False do not start the HTTP proxy
:param bool stdout: if True stream all subprocesses stdout to standard stdout
:param str dns_server: if set, Pebble/Boulder will use it to resolve domains
:param int http_01_port: port to use for http-01 validation; currently
only supported for pebble without an HTTP proxy
"""
self._construct_acme_xdist(acme_server, nodes)
self._acme_type = 'pebble' if acme_server == 'pebble' else 'boulder'
self._proxy = http_proxy
self._workspace = tempfile.mkdtemp()
self._processes: List[subprocess.Popen] = []
self._stdout = sys.stdout if stdout else open(os.devnull, 'w') # pylint: disable=consider-using-with
self._dns_server = dns_server
self._http_01_port = http_01_port
if http_01_port != DEFAULT_HTTP_01_PORT:
if self._acme_type != 'pebble' or self._proxy:
raise ValueError('setting http_01_port is not currently supported '
'with boulder or the HTTP proxy')
def start(self) -> None:
"""Start the test stack"""
try:
if self._proxy:
self._prepare_http_proxy()
if self._acme_type == 'pebble':
self._prepare_pebble_server()
if self._acme_type == 'boulder':
self._prepare_boulder_server()
except BaseException as e:
self.stop()
raise e
def stop(self) -> None:
"""Stop the test stack, and clean its resources"""
print('=> Tear down the test infrastructure...')
try:
for process in self._processes:
try:
process.terminate()
except OSError as e:
# Process may be not started yet, so no PID and terminate fails.
# Then the process never started, and the situation is acceptable.
if e.errno != errno.ESRCH:
raise
for process in self._processes:
process.wait()
if os.path.exists(os.path.join(self._workspace, 'boulder')):
# Boulder docker generates build artifacts owned by root with 0o744 permissions.
# If we started the acme server from a normal user that has access to the Docker
# daemon, this user will not be able to delete these artifacts from the host.
# We need to do it through a docker.
process = self._launch_process(['docker', 'run', '--rm', '-v',
'{0}:/workspace'.format(self._workspace),
'alpine', 'rm', '-rf', '/workspace/boulder'])
process.wait()
finally:
if os.path.exists(self._workspace):
shutil.rmtree(self._workspace)
if self._stdout != sys.stdout:
self._stdout.close()
print('=> Test infrastructure stopped and cleaned up.')
def __enter__(self) -> Dict[str, Any]:
self.start()
return self.acme_xdist
def __exit__(self, exc_type: Optional[Type[BaseException]], exc: Optional[BaseException],
traceback: Optional[TracebackType]) -> None:
self.stop()
def _construct_acme_xdist(self, acme_server: str, nodes: List[str]) -> None:
"""Generate and return the acme_xdist dict"""
acme_xdist = {'acme_server': acme_server, 'challtestsrv_port': CHALLTESTSRV_PORT}
# Directory and ACME port are set implicitly in the docker-compose.yml
# files of Boulder/Pebble.
if acme_server == 'pebble':
acme_xdist['directory_url'] = PEBBLE_DIRECTORY_URL
else: # boulder
acme_xdist['directory_url'] = BOULDER_V2_DIRECTORY_URL
acme_xdist['http_port'] = {
node: port for (node, port) in # pylint: disable=unnecessary-comprehension
zip(nodes, range(5200, 5200 + len(nodes)))
}
acme_xdist['https_port'] = {
node: port for (node, port) in # pylint: disable=unnecessary-comprehension
zip(nodes, range(5100, 5100 + len(nodes)))
}
acme_xdist['other_port'] = {
node: port for (node, port) in # pylint: disable=unnecessary-comprehension
zip(nodes, range(5300, 5300 + len(nodes)))
}
self.acme_xdist = acme_xdist
def _prepare_pebble_server(self) -> None:
"""Configure and launch the Pebble server"""
print('=> Starting pebble instance deployment...')
pebble_artifacts_rv = pebble_artifacts.fetch(self._workspace, self._http_01_port)
pebble_path, challtestsrv_path, pebble_config_path = pebble_artifacts_rv
# Configure Pebble at full speed (PEBBLE_VA_NOSLEEP=1) and not randomly refusing valid
# nonce (PEBBLE_WFE_NONCEREJECT=0) to have a stable test environment.
environ = os.environ.copy()
environ['PEBBLE_VA_NOSLEEP'] = '1'
environ['PEBBLE_WFE_NONCEREJECT'] = '0'
environ['PEBBLE_AUTHZREUSE'] = '100'
environ['PEBBLE_ALTERNATE_ROOTS'] = str(PEBBLE_ALTERNATE_ROOTS)
if self._dns_server:
dns_server = self._dns_server
else:
dns_server = '127.0.0.1:8053'
self._launch_process(
[challtestsrv_path, '-management', ':{0}'.format(CHALLTESTSRV_PORT),
'-defaultIPv6', '""', '-defaultIPv4', '127.0.0.1', '-http01', '""',
'-tlsalpn01', '""', '-https01', '""'])
self._launch_process(
[pebble_path, '-config', pebble_config_path, '-dnsserver', dns_server, '-strict'],
env=environ)
# pebble_ocsp_server is imported here and not at the top of module in order to avoid a
# useless ImportError, in the case where cryptography dependency is too old to support
# ocsp, but Boulder is used instead of Pebble, so pebble_ocsp_server is not used. This is
# the typical situation of integration-certbot-oldest tox testenv.
from certbot_integration_tests.utils import pebble_ocsp_server
self._launch_process([sys.executable, pebble_ocsp_server.__file__])
# Wait for the ACME CA server to be up.
print('=> Waiting for pebble instance to respond...')
misc.check_until_timeout(self.acme_xdist['directory_url']) # type: ignore[arg-type]
print('=> Finished pebble instance deployment.')
def _prepare_boulder_server(self) -> None:
"""Configure and launch the Boulder server"""
print('=> Starting boulder instance deployment...')
instance_path = join(self._workspace, 'boulder')
# Load Boulder from git, that includes a docker-compose.yml ready for production.
process = self._launch_process(['git', 'clone', 'https://github.com/letsencrypt/boulder',
'--single-branch', '--depth=1', instance_path])
process.wait()
# Allow Boulder to ignore usual limit rate policies, useful for tests.
os.rename(join(instance_path, 'test/rate-limit-policies-b.yml'),
join(instance_path, 'test/rate-limit-policies.yml'))
if self._dns_server:
# Change Boulder config to use the provided DNS server
for suffix in ["", "-remote-a", "-remote-b"]:
with open(join(instance_path, 'test/config/va{}.json'.format(suffix)), 'r') as f:
config = json.loads(f.read())
config['va']['dnsResolvers'] = [self._dns_server]
with open(join(instance_path, 'test/config/va{}.json'.format(suffix)), 'w') as f:
f.write(json.dumps(config, indent=2, separators=(',', ': ')))
try:
# Launch the Boulder server
self._launch_process(['docker-compose', 'up', '--force-recreate'], cwd=instance_path)
# Wait for the ACME CA server to be up.
print('=> Waiting for boulder instance to respond...')
misc.check_until_timeout(
self.acme_xdist['directory_url'], attempts=300) # type: ignore[arg-type]
if not self._dns_server:
# Configure challtestsrv to answer any A record request with ip of the docker host.
response = requests.post('http://localhost:{0}/set-default-ipv4'.format(
CHALLTESTSRV_PORT), json={'ip': '10.77.77.1'}
)
response.raise_for_status()
except BaseException:
# If we failed to set up boulder, print its logs.
print('=> Boulder setup failed. Boulder logs are:')
process = self._launch_process([
'docker-compose', 'logs'], cwd=instance_path, force_stderr=True
)
process.wait()
raise
print('=> Finished boulder instance deployment.')
def _prepare_http_proxy(self) -> None:
"""Configure and launch an HTTP proxy"""
print('=> Configuring the HTTP proxy...')
http_port_map = cast(Dict[str, int], self.acme_xdist['http_port'])
mapping = {r'.+\.{0}\.wtf'.format(node): 'http://127.0.0.1:{0}'.format(port)
for node, port in http_port_map.items()}
command = [sys.executable, proxy.__file__, str(DEFAULT_HTTP_01_PORT), json.dumps(mapping)]
self._launch_process(command)
print('=> Finished configuring the HTTP proxy.')
def _launch_process(self, command: List[str], cwd: str = os.getcwd(),
env: Optional[Mapping[str, str]] = None,
force_stderr: bool = False) -> subprocess.Popen:
"""Launch silently a subprocess OS command"""
if not env:
env = os.environ
stdout = sys.stderr if force_stderr else self._stdout
# pylint: disable=consider-using-with
process = subprocess.Popen(
command, stdout=stdout, stderr=subprocess.STDOUT, cwd=cwd, env=env
)
self._processes.append(process)
return process
def main() -> None:
# pylint: disable=missing-function-docstring
parser = argparse.ArgumentParser(
description='CLI tool to start a local instance of Pebble or Boulder CA server.')
parser.add_argument('--server-type', '-s',
choices=['pebble', 'boulder-v2'], default='pebble',
help='type of CA server to start: can be Pebble or Boulder. '
'Pebble is used if not set.')
parser.add_argument('--dns-server', '-d',
help='specify the DNS server as `IP:PORT` to use by '
'Pebble; if not specified, a local mock DNS server will be used to '
'resolve domains to localhost.')
parser.add_argument('--http-01-port', type=int, default=DEFAULT_HTTP_01_PORT,
help='specify the port to use for http-01 validation; '
'this is currently only supported for Pebble.')
args = parser.parse_args()
acme_server = ACMEServer(
args.server_type, [], http_proxy=False, stdout=True,
dns_server=args.dns_server, http_01_port=args.http_01_port,
)
try:
with acme_server as acme_xdist:
print('--> Instance of {0} is running, directory URL is {0}'
.format(acme_xdist['directory_url']))
print('--> Press CTRL+C to stop the ACME server.')
while True:
time.sleep(3600)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
| |
"""Internationalization and localization support.
This module provides internationalization (I18N) and localization (L10N)
support for your Python programs by providing an interface to the GNU gettext
message catalog library.
I18N refers to the operation by which a program is made aware of multiple
languages. L10N refers to the adaptation of your program, once
internationalized, to the local language and cultural habits.
"""
# This module represents the integration of work, contributions, feedback, and
# suggestions from the following people:
#
# Martin von Loewis, who wrote the initial implementation of the underlying
# C-based libintlmodule (later renamed _gettext), along with a skeletal
# gettext.py implementation.
#
# Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule,
# which also included a pure-Python implementation to read .mo files if
# intlmodule wasn't available.
#
# James Henstridge, who also wrote a gettext.py module, which has some
# interesting, but currently unsupported experimental features: the notion of
# a Catalog class and instances, and the ability to add to a catalog file via
# a Python API.
#
# Barry Warsaw integrated these modules, wrote the .install() API and code,
# and conformed all C and Python code to Python's coding standards.
#
# Francois Pinard and Marc-Andre Lemburg also contributed valuably to this
# module.
#
# TODO:
# - Lazy loading of .mo files. Currently the entire catalog is loaded into
# memory, but that's probably bad for large translated programs. Instead,
# the lexical sort of original strings in GNU .mo files should be exploited
# to do binary searches and lazy initializations. Or you might want to use
# the undocumented double-hash algorithm for .mo files with hash tables, but
# you'll need to study the GNU gettext code to do this.
#
# - Support Solaris .mo file formats. Unfortunately, we've been unable to
# find this format documented anywhere.
import os
import sys
import struct
from errno import ENOENT
__all__ = ["bindtextdomain","textdomain","gettext","dgettext",
"find","translation","install","Catalog"]
_default_localedir = os.path.join(sys.prefix, 'share', 'locale')
def _expand_lang(locale):
from locale import normalize
locale = normalize(locale)
COMPONENT_CODESET = 1 << 0
COMPONENT_TERRITORY = 1 << 1
COMPONENT_MODIFIER = 1 << 2
# split up the locale into its base components
mask = 0
pos = locale.find('@')
if pos >= 0:
modifier = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_MODIFIER
else:
modifier = ''
pos = locale.find('.')
if pos >= 0:
codeset = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_CODESET
else:
codeset = ''
pos = locale.find('_')
if pos >= 0:
territory = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_TERRITORY
else:
territory = ''
language = locale
ret = []
for i in range(mask+1):
if not (i & ~mask): # if all components for this combo exist ...
val = language
if i & COMPONENT_TERRITORY: val += territory
if i & COMPONENT_CODESET: val += codeset
if i & COMPONENT_MODIFIER: val += modifier
ret.append(val)
ret.reverse()
return ret
class NullTranslations:
def __init__(self, fp=None):
self._info = {}
self._charset = None
if fp:
self._parse(fp)
def _parse(self, fp):
pass
def gettext(self, message):
return message
def ugettext(self, message):
return unicode(message)
def info(self):
return self._info
def charset(self):
return self._charset
def install(self, unicode=0):
import __builtin__
__builtin__.__dict__['_'] = unicode and self.ugettext or self.gettext
class GNUTranslations(NullTranslations):
# Magic number of .mo files
LE_MAGIC = 0x950412de
BE_MAGIC = 0xde120495
def _parse(self, fp):
"""Override this method to support alternative .mo formats."""
# We need to & all 32 bit unsigned integers with 0xffffffff for
# portability to 64 bit machines.
MASK = 0xffffffff
unpack = struct.unpack
filename = getattr(fp, 'name', '')
# Parse the .mo file header, which consists of 5 little endian 32
# bit words.
self._catalog = catalog = {}
buf = fp.read()
buflen = len(buf)
# Are we big endian or little endian?
magic = unpack('<i', buf[:4])[0] & MASK
if magic == self.LE_MAGIC:
version, msgcount, masteridx, transidx = unpack('<4i', buf[4:20])
ii = '<ii'
elif magic == self.BE_MAGIC:
version, msgcount, masteridx, transidx = unpack('>4i', buf[4:20])
ii = '>ii'
else:
raise IOError(0, 'Bad magic number', filename)
# more unsigned ints
msgcount &= MASK
masteridx &= MASK
transidx &= MASK
# Now put all messages from the .mo file buffer into the catalog
# dictionary.
for i in xrange(0, msgcount):
mlen, moff = unpack(ii, buf[masteridx:masteridx+8])
moff &= MASK
mend = moff + (mlen & MASK)
tlen, toff = unpack(ii, buf[transidx:transidx+8])
toff &= MASK
tend = toff + (tlen & MASK)
if mend < buflen and tend < buflen:
tmsg = buf[toff:tend]
catalog[buf[moff:mend]] = tmsg
else:
raise IOError(0, 'File is corrupt', filename)
# See if we're looking at GNU .mo conventions for metadata
if mlen == 0 and tmsg.lower().startswith('project-id-version:'):
# Catalog description
for item in tmsg.split('\n'):
item = item.strip()
if not item:
continue
k, v = item.split(':', 1)
k = k.strip().lower()
v = v.strip()
self._info[k] = v
if k == 'content-type':
self._charset = v.split('charset=')[1]
# advance to next entry in the seek tables
masteridx += 8
transidx += 8
def gettext(self, message):
return self._catalog.get(message, message)
def ugettext(self, message):
tmsg = self._catalog.get(message, message)
return unicode(tmsg, self._charset)
# Locate a .mo file using the gettext strategy
def find(domain, localedir=None, languages=None):
# Get some reasonable defaults for arguments that were not supplied
if localedir is None:
localedir = _default_localedir
if languages is None:
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
if 'C' not in languages:
languages.append('C')
# now normalize and expand the languages
nelangs = []
for lang in languages:
for nelang in _expand_lang(lang):
if nelang not in nelangs:
nelangs.append(nelang)
# select a language
for lang in nelangs:
if lang == 'C':
break
mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain)
if os.path.exists(mofile):
return mofile
return None
# a mapping between absolute .mo file path and Translation object
_translations = {}
def translation(domain, localedir=None, languages=None, class_=None):
if class_ is None:
class_ = GNUTranslations
mofile = find(domain, localedir, languages)
if mofile is None:
raise IOError(ENOENT, 'No translation file found for domain', domain)
key = os.path.abspath(mofile)
# TBD: do we need to worry about the file pointer getting collected?
# Avoid opening, reading, and parsing the .mo file after it's been done
# once.
t = _translations.get(key)
if t is None:
t = _translations.setdefault(key, class_(open(mofile, 'rb')))
return t
def install(domain, localedir=None, unicode=0):
translation(domain, localedir).install(unicode)
# a mapping b/w domains and locale directories
_localedirs = {}
# current global domain, `messages' used for compatibility w/ GNU gettext
_current_domain = 'messages'
def textdomain(domain=None):
global _current_domain
if domain is not None:
_current_domain = domain
return _current_domain
def bindtextdomain(domain, localedir=None):
global _localedirs
if localedir is not None:
_localedirs[domain] = localedir
return _localedirs.get(domain, _default_localedir)
def dgettext(domain, message):
try:
t = translation(domain, _localedirs.get(domain, None))
except IOError:
return message
return t.gettext(message)
def gettext(message):
return dgettext(_current_domain, message)
# dcgettext() has been deemed unnecessary and is not implemented.
# James Henstridge's Catalog constructor from GNOME gettext. Documented usage
# was:
#
# import gettext
# cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR)
# _ = cat.gettext
# print _('Hello World')
# The resulting catalog object currently don't support access through a
# dictionary API, which was supported (but apparently unused) in GNOME
# gettext.
Catalog = translation
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import Axon
from Axon.Component import component
from Axon.AdaptiveCommsComponent import AdaptiveCommsComponent
from Axon.Ipc import producerFinished, shutdownMicroprocess, ipc, newComponent
from Axon import Ipc
import time
import sys
sys.path.append("../MPS")
from IRCClient import SimpleIRCClient
class demodulation(component):
"""Filters out title messages and blank lines, and tags content"""
def main(self):
while 1:
while self.dataReady("inbox"):
text = self.recv("inbox").rstrip()
if text:
# this line is not blank
if text == "Twelfth Night":
pass
elif text[:4] == " ":
if text[4:9] == "Enter":
# actors walking on
self.send(("ENTER",text[9:].strip()), "outbox")
elif text[4:12] == "Re-enter":
# actors walking on
self.send(("ENTER",text[12:].strip()), "outbox")
elif text[4:10] == "Exeunt":
# actors leaving
if not text[10:]: text+="all"
self.send(("EXIT",text[10:].strip()), "outbox")
elif text[4:8] == "Exit":
# actors leaving
self.send(("EXIT",text[8:].strip()), "outbox")
else:
# speech
self.send(("SAY",text.strip()), "outbox")
elif text[:3] == "ACT":
# act change
self.send(("ACT",text[4:].strip()), "outbox")
elif text[:5] == "SCENE":
# scene change
self.send(("SCENE",text[6:].strip()), "outbox")
else:
# actor name
self.send(("ACTOR",text.title().strip()), "outbox")
yield 1
class error_correction(component):
"""Notes speaker names and tags speech"""
def main(self):
self.currentspeaker = ""
while 1:
yield 1
if self.dataReady("inbox"):
(cmd,arg) = self.recv("inbox")
# if a speaker's name goes by, change the current speaker
# if speech goes by, tag it as the current speaker
if cmd == "ACTOR":
self.currentspeaker = arg
elif cmd == "SAY":
self.send( ("SAY", self.currentspeaker, arg), "outbox")
else:
# something else is happening, just pass it on
self.send( (cmd,arg), "outbox")
class demultiplexing(component):
"""parses ENTER and EXIT/EXEUNT stage directions"""
def main(self):
self.currentActors = {}
while 1:
yield 1
if self.dataReady("inbox"):
msg = self.recv("inbox")
cmd = msg[0]
if cmd == "ENTER" or cmd == "EXIT":
actors = self.extractActors(msg[1])
msg = [cmd] + actors
self.send( msg, "outbox")
if cmd == "ENTER":
for actor in actors:
self.currentActors[actor] = 1
else: # cmd == "EXIT":
for actor in actors:
try: del self.currentActors[actor]
except: pass
else:
self.send( msg, "outbox")
def extractActors(self, text):
# pick out names that begin with capitals, separated by lower case or punctuation
# stop at end of string or at except
# detect "all" case-insensitive
# substitute punctuation with ' and '
for punct in [",", ":", ";", "."]:
text = text.replace(punct," and ")
# split into words
words = text.split(" ")
# substitute 'all' with all actors
namewords = []
while words:
if words[0].lower() == "all":
for actor in self.currentActors.keys():
namewords.extend([" and ",actor])
namewords.append(" and ")
else:
namewords.append(words[0])
del words[0]
# now pass through
doingExcepts = False
names = []
currentname = ""
for word in namewords:
flush = False
if word:
if word == "except":
doingExcepts = True
flush = True
elif word == "and":
flush = True
elif word.islower():
flush = True
elif word[0].isupper():
currentname += word + " "
if flush and currentname:
cleanname = currentname.title().strip()
if not doingExcepts:
names.append(cleanname)
elif cleanname in names:
names.remove(cleanname)
currentname=""
if currentname:
cleanname = currentname.title().strip()
if not doingExcepts:
names.append(cleanname)
elif cleanname in names:
names.remove(cleanname)
return names
class director(AdaptiveCommsComponent):
"""Directs the action in the play"""
def __init__(self, host, channel):
super(director,self).__init__()
self.host = host
self.channel = channel
def main(self):
self.actors = {}
try:
# start the performance
yield self.enter("NARRATOR")
waitUntil = self.say("NARRATOR", "Welcome to our performance of Shakespeare's 'Twelfth Night'")
while self.scheduler.time < waitUntil:
yield 1
if self.shutdown(): raise "DONE"
waitUntil = self.say("NARRATOR", "Brought to you by http://kamaelia.sf.net/")
while self.scheduler.time < waitUntil:
yield 1
if self.shutdown(): raise "DONE"
while 1:
yield 1
if self.shutdown(): raise "DONE"
if self.dataReady("inbox"):
msg = self.recv("inbox")
cmd = msg[0].upper().strip()
args = msg[1:]
if cmd == "SAY":
# speak dialog
waitUntil = self.say( actor=args[0], words=args[1] )
while self.scheduler.time < waitUntil:
yield 1
if self.shutdown(): raise "DONE"
elif cmd == "ENTER":
# make actors, get them to walk on
for actor in args:
yield self.enter(actor)
elif cmd == "EXIT":
# get actors to leave
if args:
for actor in args:
for retval in self.exeunt(actor):
yield retval
else:
self.exeuntAll()
elif cmd == "ACT":
# act change
for retval in self.exeuntAll():
yield retval
self.say( actor="NARRATOR", words="Act "+args[0])
elif cmd == "SCENE":
# scene change
for retval in self.exeuntAll():
yield retval
self.setChange("Scene "+args[0])
waitUntil = time.time() + 5
while self.scheduler.time < waitUntil:
yield 1
if self.shutdown(): raise "DONE"
self.say("NARRATOR", "That concludes our performance. We hope you enjoyed it. Goodbye!")
waitUntil = time.time() + 5
while self.scheduler.time < waitUntil:
yield 1
if self.shutdown(): raise "DONE"
except "DONE":
for retval in self.exeuntAll(includingNarrator=True):
yield retval
def closeDownComponent(self):
pass
def shutdown(self):
"""\
Returns True if a shutdownMicroprocess or producerFinished message is received.
Also passes the message on out of the "signal" outbox.
"""
if self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, shutdownMicroprocess) or isinstance(msg, producerFinished):
self.send(msg, "signal")
return True
return False
def makeActor(self,name):
name = name.replace(" ","") # spaces not possible in nicks
return SimpleIRCClient(host=self.host, nick=name, defaultChannel=self.channel)
def say(self, actor, words):
self.send(actor+": "+words+"\n", "outbox")
outbox = self.actors[actor][1]["outbox"]
words = words.strip()
if words[0]=="[" and words[-1]=="]":
words = "\1ACTION "+words[1:-1]+"\1"
self.send( words, outbox)
return self.scheduler.time + 0.07*len(words)
def setChange(self, newLocation):
self.send( "NEW SCENE: "+newLocation+"\n", "outbox")
outbox = self.actors["NARRATOR"][1]["topic"]
self.send( newLocation, outbox)
self.waitUntil = self.scheduler.time + 4.0
def enter(self, actorName):
self.send("ENTER: "+actorName+"\n", "outbox")
if actorName not in self.actors:
actor = self.makeActor(actorName)
outboxes = dict()
linkages = dict()
for (o,i) in [("outbox","inbox"),("signal","control"),("topic","topic")]:
outboxes[o] = self.addOutbox(o)
linkages[o] = self.link( (self, outboxes[o]), (actor,i) )
self.actors[actorName] = (actor, outboxes, linkages)
self.addChildren(actor)
return newComponent(actor)
else:
return 1
def exeunt(self, actorName):
self.send("EXIT: "+actorName+"\n", "outbox")
if actorName in self.actors:
(actor, outboxes, linkages) = self.actors[actorName]
self.send(shutdownMicroprocess(self), outboxes['signal'])
yield 1
for box in outboxes.keys():
self.postoffice.deregisterlinkage(thelinkage = linkages[box])
self.deleteOutbox(outboxes[box])
del self.actors[actorName]
def exeuntAll(self, includingNarrator=False):
self.send("EXIT ALL...\n","outbox")
for actor in self.actors.keys():
if includingNarrator or actor != "NARRATOR":
for retval in self.exeunt(actor):
yield retval
if __name__ == "__main__":
from Kamaelia.Util.PipelineComponent import pipeline
from Kamaelia.File.Reading import RateControlledFileReader
from Kamaelia.Util.ConsoleEcho import consoleEchoer
pipeline( RateControlledFileReader("../CE/twelfthnight.txt", readmode="lines", rate=50, chunksize=1),
demodulation(),
error_correction(),
demultiplexing(),
director("127.0.0.1", "#theglobe"),
consoleEchoer(),
).run()
| |
pkgfiles_all = {
"/usr/share/man/man8/apt-config.8.gz": "DPKGFILE",
"/usr/share/doc/tar/NEWS.Debian.gz": "DPKGFILE",
"/usr/share/base-files/staff-group-for-usr-local": "DPKGFILE",
"/usr/share/doc/apt/examples/configure-index.gz": "DPKGFILE",
"/usr/share/man/man1/dirname.1.gz": "DPKGFILE",
"/usr/share/man/man8/grpconv.8.gz": "DPKGFILE",
"/lib/x86_64-linux-gnu/security/pam_tty_audit.so": "DPKGFILE",
}
pkgs_all = {
"libsystemd0": "232-25+deb9u12",
"tar": "1.29b-1.1",
"bsdutils": "1:2.29.2-1+deb9u1",
"multiarch-support": "2.24-11+deb9u4",
"findutils": "4.6.0+git+20161106-2",
"liblz4-1": "0.0~r131-2+b1",
"libc6": "2.24-11+deb9u4",
"libapt-pkg5.0": "1.4.10",
"libcap-ng0": "0.7.7-3+b1",
}
pkgs_allinfo = {
"adduser": {
"arch": "all",
"cpes": ["cpe:2.3:a:adduser:adduser:3.115:*:*:*:*:*:*:*"],
"license": "GPL-2",
"origin": "Debian Adduser Developers <adduser-devel@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "849000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "3.115",
},
"apt": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:apt:apt:1.4.10:*:*:*:*:*:*:*"],
"license": "GPL-2 GPLv2+",
"origin": "APT Development Team <deity@lists.debian.org> (maintainer)",
"release": "N/A",
"size": "3539000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "1.4.10",
},
"base-files": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:base-files:base-files:9.9+deb9u13:*:*:*:*:*:*:*",
"cpe:2.3:a:base_files:base-files:9.9+deb9u13:*:*:*:*:*:*:*",
"cpe:2.3:a:base-files:base_files:9.9+deb9u13:*:*:*:*:*:*:*",
"cpe:2.3:a:base_files:base_files:9.9+deb9u13:*:*:*:*:*:*:*",
"cpe:2.3:a:base:base-files:9.9+deb9u13:*:*:*:*:*:*:*",
"cpe:2.3:a:base:base_files:9.9+deb9u13:*:*:*:*:*:*:*",
],
"license": "GPL",
"origin": "Santiago Vila <sanvila@debian.org> (maintainer)",
"release": "N/A",
"size": "333000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "9.9+deb9u13",
},
"base-passwd": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:base-passwd:base-passwd:3.5.43:*:*:*:*:*:*:*",
"cpe:2.3:a:base_passwd:base-passwd:3.5.43:*:*:*:*:*:*:*",
"cpe:2.3:a:base-passwd:base_passwd:3.5.43:*:*:*:*:*:*:*",
"cpe:2.3:a:base_passwd:base_passwd:3.5.43:*:*:*:*:*:*:*",
"cpe:2.3:a:base:base-passwd:3.5.43:*:*:*:*:*:*:*",
"cpe:2.3:a:base:base_passwd:3.5.43:*:*:*:*:*:*:*",
],
"license": "GPL-2 PD",
"origin": "Colin Watson <cjwatson@debian.org> (maintainer)",
"release": "N/A",
"size": "229000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "3.5.43",
},
"bash": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:bash:bash:4.4-5:*:*:*:*:*:*:*"],
"license": "GPL-3",
"origin": "Matthias Klose <doko@debian.org> (maintainer)",
"release": "N/A",
"size": "5798000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "4.4-5",
},
"bsdutils": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:bsdutils:bsdutils:1:2.29.2-1+deb9u1:*:*:*:*:*:*:*"],
"license": "BSD-2-clause BSD-3-clause BSD-4-clause GPL-2 GPL-2+ GPL-3 GPL-3+ LGPL LGPL-2 LGPL-2+ LGPL-2.1 LGPL-2.1+ LGPL-3 LGPL-3+ MIT public-domain",
"origin": "Debian util-linux Maintainers <ah-util-linux@debian.org> (maintainer)",
"release": "N/A",
"size": "238000",
"sourcepkg": "util-linux",
"type": "dpkg",
"version": "1:2.29.2-1+deb9u1",
},
"coreutils": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:coreutils:coreutils:8.26-3:*:*:*:*:*:*:*"],
"license": "GPL-3",
"origin": "Michael Stone <mstone@debian.org> (maintainer)",
"release": "N/A",
"size": "15103000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "8.26-3",
},
"cron-apt": {
"arch": "all",
"cpes": [
"cpe:2.3:a:cron_apt:cron-apt:0.10.0:*:*:*:*:*:*:*",
"cpe:2.3:a:cron-apt:cron-apt:0.10.0:*:*:*:*:*:*:*",
"cpe:2.3:a:cron_apt:cron_apt:0.10.0:*:*:*:*:*:*:*",
"cpe:2.3:a:cron-apt:cron_apt:0.10.0:*:*:*:*:*:*:*",
"cpe:2.3:a:cron:cron-apt:0.10.0:*:*:*:*:*:*:*",
"cpe:2.3:a:cron:cron_apt:0.10.0:*:*:*:*:*:*:*",
],
"license": "GPL",
"origin": "Ola Lundqvist <opal@debian.org> (maintainer)",
"release": "N/A",
"size": "167000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "0.10.0",
},
"dash": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:dash:dash:0.5.8-2.4:*:*:*:*:*:*:*"],
"license": "GPL",
"origin": "Gerrit Pape <pape@smarden.org> (maintainer)",
"release": "N/A",
"size": "204000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "0.5.8-2.4",
},
"debconf": {
"arch": "all",
"cpes": ["cpe:2.3:a:debconf:debconf:1.5.61:*:*:*:*:*:*:*"],
"license": "BSD-2-clause",
"origin": "Debconf Developers <debconf-devel@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "558000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "1.5.61",
},
"debian-archive-keyring": {
"arch": "all",
"cpes": [
"cpe:2.3:a:debian-archive-keyring:debian_archive_keyring:2017.5+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:debian-archive-keyring:debian-archive-keyring:2017.5+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:debian_archive_keyring:debian_archive_keyring:2017.5+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:debian_archive_keyring:debian-archive-keyring:2017.5+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:debian-archive:debian_archive_keyring:2017.5+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:debian-archive:debian-archive-keyring:2017.5+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:debian_archive:debian_archive_keyring:2017.5+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:debian_archive:debian-archive-keyring:2017.5+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:debian:debian_archive_keyring:2017.5+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:debian:debian-archive-keyring:2017.5+deb9u1:*:*:*:*:*:*:*",
],
"license": "GPL",
"origin": "Debian Release Team <packages@release.debian.org> (maintainer)",
"release": "N/A",
"size": "148000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "2017.5+deb9u1",
},
"debianutils": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:debianutils:debianutils:4.8.1.1:*:*:*:*:*:*:*"],
"license": "GPL",
"origin": "Clint Adams <clint@debian.org> (maintainer)",
"release": "N/A",
"size": "213000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "4.8.1.1",
},
"diffutils": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:diffutils:diffutils:1:3.5-3:*:*:*:*:*:*:*"],
"license": "GFDL GPL",
"origin": "Santiago Vila <sanvila@debian.org> (maintainer)",
"release": "N/A",
"size": "1327000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "1:3.5-3",
},
"dpkg": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:dpkg:dpkg:1.18.25:*:*:*:*:*:*:*"],
"license": "BSD-2-clause GPL-2 GPL-2+ public-domain-md5 public-domain-s-s-d",
"origin": "Dpkg Developers <debian-dpkg@lists.debian.org> (maintainer)",
"release": "N/A",
"size": "6778000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "1.18.25",
},
"e2fslibs": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:e2fslibs:e2fslibs:1.43.4-2+deb9u2:*:*:*:*:*:*:*"],
"license": "GPL-2 LGPL-2",
"origin": "Theodore Y. Ts'o <tytso@mit.edu> (maintainer)",
"release": "N/A",
"size": "450000",
"sourcepkg": "e2fsprogs",
"type": "dpkg",
"version": "1.43.4-2+deb9u2",
},
"e2fsprogs": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:e2fsprogs:e2fsprogs:1.43.4-2+deb9u2:*:*:*:*:*:*:*"],
"license": "GPL-2 LGPL-2",
"origin": "Theodore Y. Ts'o <tytso@mit.edu> (maintainer)",
"release": "N/A",
"size": "4027000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "1.43.4-2+deb9u2",
},
"findutils": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:findutils:findutils:4.6.0+git+20161106-2:*:*:*:*:*:*:*"],
"license": "GFDL-1.3 GPL-3",
"origin": "Andreas Metzler <ametzler@debian.org> (maintainer)",
"release": "N/A",
"size": "1854000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "4.6.0+git+20161106-2",
},
"gcc-6-base": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:gcc-6-base:gcc_6_base:6.3.0-18+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:gcc-6-base:gcc-6-base:6.3.0-18+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:gcc_6_base:gcc_6_base:6.3.0-18+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:gcc_6_base:gcc-6-base:6.3.0-18+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:gcc-6:gcc_6_base:6.3.0-18+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:gcc-6:gcc-6-base:6.3.0-18+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:gcc_6:gcc_6_base:6.3.0-18+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:gcc_6:gcc-6-base:6.3.0-18+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:gcc:gcc_6_base:6.3.0-18+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:gcc:gcc-6-base:6.3.0-18+deb9u1:*:*:*:*:*:*:*",
],
"license": "Artistic GFDL-1.2 GPL GPL-2 GPL-3",
"origin": "Debian GCC Maintainers <debian-gcc@lists.debian.org> (maintainer)",
"release": "N/A",
"size": "209000",
"sourcepkg": "gcc-6",
"type": "dpkg",
"version": "6.3.0-18+deb9u1",
},
"gpgv": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:gpgv:gpgv:2.1.18-8~deb9u4:*:*:*:*:*:*:*"],
"license": "BSD-3-clause Expat GPL-3 GPL-3+ LGPL-2.1 LGPL-2.1+ LGPL-3 LGPL-3+ RFC-Reference TinySCHEME permissive",
"origin": "Debian GnuPG Maintainers <pkg-gnupg-maint@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "721000",
"sourcepkg": "gnupg2",
"type": "dpkg",
"version": "2.1.18-8~deb9u4",
},
"grep": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:grep:grep:2.27-2:*:*:*:*:*:*:*"],
"license": "GPL-3 GPL-3+",
"origin": "Anibal Monsalve Salazar <anibal@debian.org> (maintainer)",
"release": "N/A",
"size": "1131000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "2.27-2",
},
"gzip": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:gzip:gzip:1.6-5+b1:*:*:*:*:*:*:*"],
"license": "GPL",
"origin": "Bdale Garbee <bdale@gag.com> (maintainer)",
"release": "N/A",
"size": "231000",
"sourcepkg": "gzip",
"type": "dpkg",
"version": "1.6-5+b1",
},
"hostname": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:hostname:hostname:3.18+b1:*:*:*:*:*:*:*"],
"license": "GPL-2",
"origin": "Debian Hostname Team <hostname-devel@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "47000",
"sourcepkg": "hostname",
"type": "dpkg",
"version": "3.18+b1",
},
"init-system-helpers": {
"arch": "all",
"cpes": [
"cpe:2.3:a:init-system-helpers:init-system-helpers:1.48:*:*:*:*:*:*:*",
"cpe:2.3:a:init-system-helpers:init_system_helpers:1.48:*:*:*:*:*:*:*",
"cpe:2.3:a:init_system_helpers:init-system-helpers:1.48:*:*:*:*:*:*:*",
"cpe:2.3:a:init_system_helpers:init_system_helpers:1.48:*:*:*:*:*:*:*",
"cpe:2.3:a:init-system:init-system-helpers:1.48:*:*:*:*:*:*:*",
"cpe:2.3:a:init-system:init_system_helpers:1.48:*:*:*:*:*:*:*",
"cpe:2.3:a:init_system:init-system-helpers:1.48:*:*:*:*:*:*:*",
"cpe:2.3:a:init_system:init_system_helpers:1.48:*:*:*:*:*:*:*",
"cpe:2.3:a:init:init-system-helpers:1.48:*:*:*:*:*:*:*",
"cpe:2.3:a:init:init_system_helpers:1.48:*:*:*:*:*:*:*",
],
"license": "BSD-3-clause GPL-2 GPL-2+",
"origin": "Debian systemd Maintainers <pkg-systemd-maintainers@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "131000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "1.48",
},
"libacl1": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libacl1:libacl1:2.2.52-3+b1:*:*:*:*:*:*:*"],
"license": "GPL LGPL-2.1",
"origin": "Anibal Monsalve Salazar <anibal@debian.org> (maintainer)",
"release": "N/A",
"size": "62000",
"sourcepkg": "acl",
"type": "dpkg",
"version": "2.2.52-3+b1",
},
"libapt-pkg5.0": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:libapt-pkg5.0:libapt-pkg5.0:1.4.10:*:*:*:*:*:*:*",
"cpe:2.3:a:libapt_pkg5.0:libapt-pkg5.0:1.4.10:*:*:*:*:*:*:*",
"cpe:2.3:a:libapt-pkg5.0:libapt_pkg5.0:1.4.10:*:*:*:*:*:*:*",
"cpe:2.3:a:libapt_pkg5.0:libapt_pkg5.0:1.4.10:*:*:*:*:*:*:*",
"cpe:2.3:a:libapt:libapt-pkg5.0:1.4.10:*:*:*:*:*:*:*",
"cpe:2.3:a:libapt:libapt_pkg5.0:1.4.10:*:*:*:*:*:*:*",
],
"license": "GPL-2 GPLv2+",
"origin": "APT Development Team <deity@lists.debian.org> (maintainer)",
"release": "N/A",
"size": "3056000",
"sourcepkg": "apt",
"type": "dpkg",
"version": "1.4.10",
},
"libattr1": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libattr1:libattr1:1:2.4.47-2+b2:*:*:*:*:*:*:*"],
"license": "GPL-2 LGPL-2.1",
"origin": "Anibal Monsalve Salazar <anibal@debian.org> (maintainer)",
"release": "N/A",
"size": "42000",
"sourcepkg": "attr",
"type": "dpkg",
"version": "1:2.4.47-2+b2",
},
"libaudit-common": {
"arch": "all",
"cpes": [
"cpe:2.3:a:libaudit-common:libaudit-common:1:2.6.7-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libaudit_common:libaudit-common:1:2.6.7-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libaudit-common:libaudit_common:1:2.6.7-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libaudit_common:libaudit_common:1:2.6.7-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libaudit:libaudit-common:1:2.6.7-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libaudit:libaudit_common:1:2.6.7-2:*:*:*:*:*:*:*",
],
"license": "GPL-1 GPL-2 LGPL-2.1",
"origin": "Laurent Bigonville <bigon@debian.org> (maintainer)",
"release": "N/A",
"size": "30000",
"sourcepkg": "audit",
"type": "dpkg",
"version": "1:2.6.7-2",
},
"libaudit1": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libaudit1:libaudit1:1:2.6.7-2:*:*:*:*:*:*:*"],
"license": "GPL-1 GPL-2 LGPL-2.1",
"origin": "Laurent Bigonville <bigon@debian.org> (maintainer)",
"release": "N/A",
"size": "150000",
"sourcepkg": "audit",
"type": "dpkg",
"version": "1:2.6.7-2",
},
"libblkid1": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libblkid1:libblkid1:2.29.2-1+deb9u1:*:*:*:*:*:*:*"],
"license": "BSD-2-clause BSD-3-clause BSD-4-clause GPL-2 GPL-2+ GPL-3 GPL-3+ LGPL LGPL-2 LGPL-2+ LGPL-2.1 LGPL-2.1+ LGPL-3 LGPL-3+ MIT public-domain",
"origin": "Debian util-linux Maintainers <ah-util-linux@debian.org> (maintainer)",
"release": "N/A",
"size": "367000",
"sourcepkg": "util-linux",
"type": "dpkg",
"version": "2.29.2-1+deb9u1",
},
"libbz2-1.0": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:libbz2-1.0:libbz2-1.0:1.0.6-8.1:*:*:*:*:*:*:*",
"cpe:2.3:a:libbz2_1.0:libbz2-1.0:1.0.6-8.1:*:*:*:*:*:*:*",
"cpe:2.3:a:libbz2-1.0:libbz2_1.0:1.0.6-8.1:*:*:*:*:*:*:*",
"cpe:2.3:a:libbz2_1.0:libbz2_1.0:1.0.6-8.1:*:*:*:*:*:*:*",
"cpe:2.3:a:libbz2:libbz2-1.0:1.0.6-8.1:*:*:*:*:*:*:*",
"cpe:2.3:a:libbz2:libbz2_1.0:1.0.6-8.1:*:*:*:*:*:*:*",
],
"license": "GPL-2",
"origin": "Anibal Monsalve Salazar <anibal@debian.org> (maintainer)",
"release": "N/A",
"size": "96000",
"sourcepkg": "bzip2",
"type": "dpkg",
"version": "1.0.6-8.1",
},
"libc-bin": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:libc-bin:libc-bin:2.24-11+deb9u4:*:*:*:*:*:*:*",
"cpe:2.3:a:libc_bin:libc-bin:2.24-11+deb9u4:*:*:*:*:*:*:*",
"cpe:2.3:a:libc-bin:libc_bin:2.24-11+deb9u4:*:*:*:*:*:*:*",
"cpe:2.3:a:libc_bin:libc_bin:2.24-11+deb9u4:*:*:*:*:*:*:*",
"cpe:2.3:a:libc:libc-bin:2.24-11+deb9u4:*:*:*:*:*:*:*",
"cpe:2.3:a:libc:libc_bin:2.24-11+deb9u4:*:*:*:*:*:*:*",
],
"license": "GPL-2 LGPL-2.1",
"origin": "GNU Libc Maintainers <debian-glibc@lists.debian.org> (maintainer)",
"release": "N/A",
"size": "3366000",
"sourcepkg": "glibc",
"type": "dpkg",
"version": "2.24-11+deb9u4",
},
"libc6": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libc6:libc6:2.24-11+deb9u4:*:*:*:*:*:*:*"],
"license": "GPL-2 LGPL-2.1",
"origin": "GNU Libc Maintainers <debian-glibc@lists.debian.org> (maintainer)",
"release": "N/A",
"size": "10686000",
"sourcepkg": "glibc",
"type": "dpkg",
"version": "2.24-11+deb9u4",
},
"libcap-ng0": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:libcap-ng0:libcap-ng0:0.7.7-3+b1:*:*:*:*:*:*:*",
"cpe:2.3:a:libcap_ng0:libcap-ng0:0.7.7-3+b1:*:*:*:*:*:*:*",
"cpe:2.3:a:libcap-ng0:libcap_ng0:0.7.7-3+b1:*:*:*:*:*:*:*",
"cpe:2.3:a:libcap_ng0:libcap_ng0:0.7.7-3+b1:*:*:*:*:*:*:*",
"cpe:2.3:a:libcap:libcap-ng0:0.7.7-3+b1:*:*:*:*:*:*:*",
"cpe:2.3:a:libcap:libcap_ng0:0.7.7-3+b1:*:*:*:*:*:*:*",
],
"license": "GPL-2 GPL-3 LGPL-2.1",
"origin": "Pierre Chifflier <pollux@debian.org> (maintainer)",
"release": "N/A",
"size": "43000",
"sourcepkg": "libcap-ng",
"type": "dpkg",
"version": "0.7.7-3+b1",
},
"libcomerr2": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libcomerr2:libcomerr2:1.43.4-2+deb9u2:*:*:*:*:*:*:*"],
"license": "Unknown",
"origin": "Theodore Y. Ts'o <tytso@mit.edu> (maintainer)",
"release": "N/A",
"size": "84000",
"sourcepkg": "e2fsprogs",
"type": "dpkg",
"version": "1.43.4-2+deb9u2",
},
"libdb5.3": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libdb5.3:libdb5.3:5.3.28-12+deb9u1:*:*:*:*:*:*:*"],
"license": "Unknown",
"origin": "Debian Berkeley DB Group <pkg-db-devel@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "1814000",
"sourcepkg": "db5.3",
"type": "dpkg",
"version": "5.3.28-12+deb9u1",
},
"libdebconfclient0": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libdebconfclient0:libdebconfclient0:0.227:*:*:*:*:*:*:*"],
"license": "Unknown",
"origin": "Debian Install System Team <debian-boot@lists.debian.org> (maintainer)",
"release": "N/A",
"size": "67000",
"sourcepkg": "cdebconf",
"type": "dpkg",
"version": "0.227",
},
"libfdisk1": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libfdisk1:libfdisk1:2.29.2-1+deb9u1:*:*:*:*:*:*:*"],
"license": "BSD-2-clause BSD-3-clause BSD-4-clause GPL-2 GPL-2+ GPL-3 GPL-3+ LGPL LGPL-2 LGPL-2+ LGPL-2.1 LGPL-2.1+ LGPL-3 LGPL-3+ MIT public-domain",
"origin": "Debian util-linux Maintainers <ah-util-linux@debian.org> (maintainer)",
"release": "N/A",
"size": "469000",
"sourcepkg": "util-linux",
"type": "dpkg",
"version": "2.29.2-1+deb9u1",
},
"libgcc1": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libgcc1:libgcc1:1:6.3.0-18+deb9u1:*:*:*:*:*:*:*"],
"license": "Artistic GFDL-1.2 GPL GPL-2 GPL-3",
"origin": "Debian GCC Maintainers <debian-gcc@lists.debian.org> (maintainer)",
"release": "N/A",
"size": "108000",
"sourcepkg": "gcc-6",
"type": "dpkg",
"version": "1:6.3.0-18+deb9u1",
},
"libgcrypt20": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libgcrypt20:libgcrypt20:1.7.6-2+deb9u3:*:*:*:*:*:*:*"],
"license": "GPL-2 LGPL",
"origin": "Debian GnuTLS Maintainers <pkg-gnutls-maint@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "1266000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "1.7.6-2+deb9u3",
},
"libgpg-error0": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:libgpg-error0:libgpg-error0:1.26-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libgpg_error0:libgpg-error0:1.26-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libgpg-error0:libgpg_error0:1.26-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libgpg_error0:libgpg_error0:1.26-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libgpg:libgpg-error0:1.26-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libgpg:libgpg_error0:1.26-2:*:*:*:*:*:*:*",
],
"license": "GPL-2.1+ LGPL-2.1",
"origin": "Debian GnuPG Maintainers <pkg-gnupg-maint@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "572000",
"sourcepkg": "libgpg-error",
"type": "dpkg",
"version": "1.26-2",
},
"liblz4-1": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:liblz4-1:liblz4-1:0.0~r131-2+b1:*:*:*:*:*:*:*",
"cpe:2.3:a:liblz4_1:liblz4-1:0.0~r131-2+b1:*:*:*:*:*:*:*",
"cpe:2.3:a:liblz4-1:liblz4_1:0.0~r131-2+b1:*:*:*:*:*:*:*",
"cpe:2.3:a:liblz4_1:liblz4_1:0.0~r131-2+b1:*:*:*:*:*:*:*",
"cpe:2.3:a:liblz4:liblz4-1:0.0~r131-2+b1:*:*:*:*:*:*:*",
"cpe:2.3:a:liblz4:liblz4_1:0.0~r131-2+b1:*:*:*:*:*:*:*",
],
"license": "BSD-2-clause GPL-2 GPL-2+",
"origin": "Nobuhiro Iwamatsu <iwamatsu@debian.org> (maintainer)",
"release": "N/A",
"size": "93000",
"sourcepkg": "lz4",
"type": "dpkg",
"version": "0.0~r131-2+b1",
},
"liblzma5": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:liblzma5:liblzma5:5.2.2-1.2+b1:*:*:*:*:*:*:*"],
"license": "Autoconf GPL-2 GPL-2+ GPL-3 LGPL-2 LGPL-2.1 LGPL-2.1+ PD PD-debian config-h noderivs permissive-fsf permissive-nowarranty probably-PD",
"origin": "Jonathan Nieder <jrnieder@gmail.com> (maintainer)",
"release": "N/A",
"size": "339000",
"sourcepkg": "xz-utils",
"type": "dpkg",
"version": "5.2.2-1.2+b1",
},
"libmount1": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libmount1:libmount1:2.29.2-1+deb9u1:*:*:*:*:*:*:*"],
"license": "BSD-2-clause BSD-3-clause BSD-4-clause GPL-2 GPL-2+ GPL-3 GPL-3+ LGPL LGPL-2 LGPL-2+ LGPL-2.1 LGPL-2.1+ LGPL-3 LGPL-3+ MIT public-domain",
"origin": "Debian util-linux Maintainers <ah-util-linux@debian.org> (maintainer)",
"release": "N/A",
"size": "403000",
"sourcepkg": "util-linux",
"type": "dpkg",
"version": "2.29.2-1+deb9u1",
},
"libncursesw5": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:libncursesw5:libncursesw5:6.0+20161126-1+deb9u2:*:*:*:*:*:*:*"
],
"license": "Unknown",
"origin": "Craig Small <csmall@debian.org> (maintainer)",
"release": "N/A",
"size": "347000",
"sourcepkg": "ncurses",
"type": "dpkg",
"version": "6.0+20161126-1+deb9u2",
},
"libpam-modules": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:libpam-modules:libpam_modules:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam_modules:libpam_modules:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam-modules:libpam-modules:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam_modules:libpam-modules:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam:libpam_modules:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam:libpam-modules:1.1.8-3.6:*:*:*:*:*:*:*",
],
"license": "GPL",
"origin": "Steve Langasek <vorlon@debian.org> (maintainer)",
"release": "N/A",
"size": "874000",
"sourcepkg": "pam",
"type": "dpkg",
"version": "1.1.8-3.6",
},
"libpam-modules-bin": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:libpam-modules-bin:libpam_modules_bin:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam-modules-bin:libpam-modules-bin:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam_modules_bin:libpam_modules_bin:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam_modules_bin:libpam-modules-bin:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam-modules:libpam_modules_bin:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam-modules:libpam-modules-bin:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam_modules:libpam_modules_bin:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam_modules:libpam-modules-bin:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam:libpam_modules_bin:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam:libpam-modules-bin:1.1.8-3.6:*:*:*:*:*:*:*",
],
"license": "GPL",
"origin": "Steve Langasek <vorlon@debian.org> (maintainer)",
"release": "N/A",
"size": "220000",
"sourcepkg": "pam",
"type": "dpkg",
"version": "1.1.8-3.6",
},
"libpam-runtime": {
"arch": "all",
"cpes": [
"cpe:2.3:a:libpam_runtime:libpam-runtime:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam-runtime:libpam-runtime:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam_runtime:libpam_runtime:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam-runtime:libpam_runtime:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam:libpam-runtime:1.1.8-3.6:*:*:*:*:*:*:*",
"cpe:2.3:a:libpam:libpam_runtime:1.1.8-3.6:*:*:*:*:*:*:*",
],
"license": "GPL",
"origin": "Steve Langasek <vorlon@debian.org> (maintainer)",
"release": "N/A",
"size": "1016000",
"sourcepkg": "pam",
"type": "dpkg",
"version": "1.1.8-3.6",
},
"libpam0g": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libpam0g:libpam0g:1.1.8-3.6:*:*:*:*:*:*:*"],
"license": "GPL",
"origin": "Steve Langasek <vorlon@debian.org> (maintainer)",
"release": "N/A",
"size": "229000",
"sourcepkg": "pam",
"type": "dpkg",
"version": "1.1.8-3.6",
},
"libpcre3": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libpcre3:libpcre3:2:8.39-3:*:*:*:*:*:*:*"],
"license": "Unknown",
"origin": "Matthew Vernon <matthew@debian.org> (maintainer)",
"release": "N/A",
"size": "668000",
"sourcepkg": "pcre3",
"type": "dpkg",
"version": "2:8.39-3",
},
"libselinux1": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libselinux1:libselinux1:2.6-3+b3:*:*:*:*:*:*:*"],
"license": "GPL-2 LGPL-2.1",
"origin": "Debian SELinux maintainers <selinux-devel@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "209000",
"sourcepkg": "libselinux",
"type": "dpkg",
"version": "2.6-3+b3",
},
"libsemanage-common": {
"arch": "all",
"cpes": [
"cpe:2.3:a:libsemanage-common:libsemanage-common:2.6-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libsemanage_common:libsemanage-common:2.6-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libsemanage-common:libsemanage_common:2.6-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libsemanage_common:libsemanage_common:2.6-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libsemanage:libsemanage-common:2.6-2:*:*:*:*:*:*:*",
"cpe:2.3:a:libsemanage:libsemanage_common:2.6-2:*:*:*:*:*:*:*",
],
"license": "GPL LGPL",
"origin": "Debian SELinux maintainers <selinux-devel@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "39000",
"sourcepkg": "libsemanage",
"type": "dpkg",
"version": "2.6-2",
},
"libsemanage1": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libsemanage1:libsemanage1:2.6-2:*:*:*:*:*:*:*"],
"license": "GPL LGPL",
"origin": "Debian SELinux maintainers <selinux-devel@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "291000",
"sourcepkg": "libsemanage",
"type": "dpkg",
"version": "2.6-2",
},
"libsepol1": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libsepol1:libsepol1:2.6-2:*:*:*:*:*:*:*"],
"license": "GPL LGPL",
"origin": "Debian SELinux maintainers <selinux-devel@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "653000",
"sourcepkg": "libsepol",
"type": "dpkg",
"version": "2.6-2",
},
"libsmartcols1": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libsmartcols1:libsmartcols1:2.29.2-1+deb9u1:*:*:*:*:*:*:*"],
"license": "BSD-2-clause BSD-3-clause BSD-4-clause GPL-2 GPL-2+ GPL-3 GPL-3+ LGPL LGPL-2 LGPL-2+ LGPL-2.1 LGPL-2.1+ LGPL-3 LGPL-3+ MIT public-domain",
"origin": "Debian util-linux Maintainers <ah-util-linux@debian.org> (maintainer)",
"release": "N/A",
"size": "257000",
"sourcepkg": "util-linux",
"type": "dpkg",
"version": "2.29.2-1+deb9u1",
},
"libss2": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libss2:libss2:1.43.4-2+deb9u2:*:*:*:*:*:*:*"],
"license": "Unknown",
"origin": "Theodore Y. Ts'o <tytso@mit.edu> (maintainer)",
"release": "N/A",
"size": "96000",
"sourcepkg": "e2fsprogs",
"type": "dpkg",
"version": "1.43.4-2+deb9u2",
},
"libstdc++6": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libstdc++6:libstdc++6:6.3.0-18+deb9u1:*:*:*:*:*:*:*"],
"license": "Artistic GFDL-1.2 GPL GPL-2 GPL-3",
"origin": "Debian GCC Maintainers <debian-gcc@lists.debian.org> (maintainer)",
"release": "N/A",
"size": "1998000",
"sourcepkg": "gcc-6",
"type": "dpkg",
"version": "6.3.0-18+deb9u1",
},
"libsystemd0": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libsystemd0:libsystemd0:232-25+deb9u12:*:*:*:*:*:*:*"],
"license": "CC0 Expat GPL-2 GPL-2+ LGPL-2.1 LGPL-2.1+ public-domain",
"origin": "Debian systemd Maintainers <pkg-systemd-maintainers@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "654000",
"sourcepkg": "systemd",
"type": "dpkg",
"version": "232-25+deb9u12",
},
"libtinfo5": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libtinfo5:libtinfo5:6.0+20161126-1+deb9u2:*:*:*:*:*:*:*"],
"license": "Unknown",
"origin": "Craig Small <csmall@debian.org> (maintainer)",
"release": "N/A",
"size": "478000",
"sourcepkg": "ncurses",
"type": "dpkg",
"version": "6.0+20161126-1+deb9u2",
},
"libudev1": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libudev1:libudev1:232-25+deb9u12:*:*:*:*:*:*:*"],
"license": "CC0 Expat GPL-2 GPL-2+ LGPL-2.1 LGPL-2.1+ public-domain",
"origin": "Debian systemd Maintainers <pkg-systemd-maintainers@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "224000",
"sourcepkg": "systemd",
"type": "dpkg",
"version": "232-25+deb9u12",
},
"libustr-1.0-1": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:libustr-1.0-1:libustr_1.0_1:1.0.4-6:*:*:*:*:*:*:*",
"cpe:2.3:a:libustr-1.0-1:libustr-1.0-1:1.0.4-6:*:*:*:*:*:*:*",
"cpe:2.3:a:libustr_1.0_1:libustr_1.0_1:1.0.4-6:*:*:*:*:*:*:*",
"cpe:2.3:a:libustr_1.0_1:libustr-1.0-1:1.0.4-6:*:*:*:*:*:*:*",
"cpe:2.3:a:libustr-1.0:libustr_1.0_1:1.0.4-6:*:*:*:*:*:*:*",
"cpe:2.3:a:libustr-1.0:libustr-1.0-1:1.0.4-6:*:*:*:*:*:*:*",
"cpe:2.3:a:libustr_1.0:libustr_1.0_1:1.0.4-6:*:*:*:*:*:*:*",
"cpe:2.3:a:libustr_1.0:libustr-1.0-1:1.0.4-6:*:*:*:*:*:*:*",
"cpe:2.3:a:libustr:libustr_1.0_1:1.0.4-6:*:*:*:*:*:*:*",
"cpe:2.3:a:libustr:libustr-1.0-1:1.0.4-6:*:*:*:*:*:*:*",
],
"license": "BSD-2-clause GPL-2 GPL-2+ LGPL-2+ LGPL-2.1 MIT",
"origin": "Vaclav Ovsik <vaclav.ovsik@i.cz> (maintainer)",
"release": "N/A",
"size": "258000",
"sourcepkg": "ustr",
"type": "dpkg",
"version": "1.0.4-6",
},
"libuuid1": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:libuuid1:libuuid1:2.29.2-1+deb9u1:*:*:*:*:*:*:*"],
"license": "BSD-2-clause BSD-3-clause BSD-4-clause GPL-2 GPL-2+ GPL-3 GPL-3+ LGPL LGPL-2 LGPL-2+ LGPL-2.1 LGPL-2.1+ LGPL-3 LGPL-3+ MIT public-domain",
"origin": "Debian util-linux Maintainers <ah-util-linux@debian.org> (maintainer)",
"release": "N/A",
"size": "107000",
"sourcepkg": "util-linux",
"type": "dpkg",
"version": "2.29.2-1+deb9u1",
},
"login": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:login:login:1:4.4-4.1:*:*:*:*:*:*:*"],
"license": "GPL-2",
"origin": "Shadow package maintainers <pkg-shadow-devel@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "2747000",
"sourcepkg": "shadow",
"type": "dpkg",
"version": "1:4.4-4.1",
},
"lsb-base": {
"arch": "all",
"cpes": [
"cpe:2.3:a:lsb-base:lsb-base:9.20161125:*:*:*:*:*:*:*",
"cpe:2.3:a:lsb_base:lsb-base:9.20161125:*:*:*:*:*:*:*",
"cpe:2.3:a:lsb-base:lsb_base:9.20161125:*:*:*:*:*:*:*",
"cpe:2.3:a:lsb_base:lsb_base:9.20161125:*:*:*:*:*:*:*",
"cpe:2.3:a:lsb:lsb-base:9.20161125:*:*:*:*:*:*:*",
"cpe:2.3:a:lsb:lsb_base:9.20161125:*:*:*:*:*:*:*",
],
"license": "BSD-3-clause GPL-2",
"origin": "Debian LSB Team <debian-lsb@lists.debian.org> (maintainer)",
"release": "N/A",
"size": "49000",
"sourcepkg": "lsb",
"type": "dpkg",
"version": "9.20161125",
},
"mawk": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:mawk:mawk:1.3.3-17+b3:*:*:*:*:*:*:*"],
"license": "GPL-2",
"origin": "Steve Langasek <vorlon@debian.org> (maintainer)",
"release": "N/A",
"size": "183000",
"sourcepkg": "mawk",
"type": "dpkg",
"version": "1.3.3-17+b3",
},
"mount": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:mount:mount:2.29.2-1+deb9u1:*:*:*:*:*:*:*"],
"license": "BSD-2-clause BSD-3-clause BSD-4-clause GPL-2 GPL-2+ GPL-3 GPL-3+ LGPL LGPL-2 LGPL-2+ LGPL-2.1 LGPL-2.1+ LGPL-3 LGPL-3+ MIT public-domain",
"origin": "Debian util-linux Maintainers <ah-util-linux@debian.org> (maintainer)",
"release": "N/A",
"size": "444000",
"sourcepkg": "util-linux",
"type": "dpkg",
"version": "2.29.2-1+deb9u1",
},
"multiarch-support": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:multiarch-support:multiarch-support:2.24-11+deb9u4:*:*:*:*:*:*:*",
"cpe:2.3:a:multiarch_support:multiarch-support:2.24-11+deb9u4:*:*:*:*:*:*:*",
"cpe:2.3:a:multiarch-support:multiarch_support:2.24-11+deb9u4:*:*:*:*:*:*:*",
"cpe:2.3:a:multiarch_support:multiarch_support:2.24-11+deb9u4:*:*:*:*:*:*:*",
"cpe:2.3:a:multiarch:multiarch-support:2.24-11+deb9u4:*:*:*:*:*:*:*",
"cpe:2.3:a:multiarch:multiarch_support:2.24-11+deb9u4:*:*:*:*:*:*:*",
],
"license": "GPL-2 LGPL-2.1",
"origin": "GNU Libc Maintainers <debian-glibc@lists.debian.org> (maintainer)",
"release": "N/A",
"size": "221000",
"sourcepkg": "glibc",
"type": "dpkg",
"version": "2.24-11+deb9u4",
},
"ncurses-base": {
"arch": "all",
"cpes": [
"cpe:2.3:a:ncurses-base:ncurses-base:6.0+20161126-1+deb9u2:*:*:*:*:*:*:*",
"cpe:2.3:a:ncurses_base:ncurses-base:6.0+20161126-1+deb9u2:*:*:*:*:*:*:*",
"cpe:2.3:a:ncurses-base:ncurses_base:6.0+20161126-1+deb9u2:*:*:*:*:*:*:*",
"cpe:2.3:a:ncurses_base:ncurses_base:6.0+20161126-1+deb9u2:*:*:*:*:*:*:*",
"cpe:2.3:a:ncurses:ncurses-base:6.0+20161126-1+deb9u2:*:*:*:*:*:*:*",
"cpe:2.3:a:ncurses:ncurses_base:6.0+20161126-1+deb9u2:*:*:*:*:*:*:*",
],
"license": "Unknown",
"origin": "Craig Small <csmall@debian.org> (maintainer)",
"release": "N/A",
"size": "340000",
"sourcepkg": "ncurses",
"type": "dpkg",
"version": "6.0+20161126-1+deb9u2",
},
"ncurses-bin": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:ncurses-bin:ncurses-bin:6.0+20161126-1+deb9u2:*:*:*:*:*:*:*",
"cpe:2.3:a:ncurses_bin:ncurses-bin:6.0+20161126-1+deb9u2:*:*:*:*:*:*:*",
"cpe:2.3:a:ncurses-bin:ncurses_bin:6.0+20161126-1+deb9u2:*:*:*:*:*:*:*",
"cpe:2.3:a:ncurses_bin:ncurses_bin:6.0+20161126-1+deb9u2:*:*:*:*:*:*:*",
"cpe:2.3:a:ncurses:ncurses-bin:6.0+20161126-1+deb9u2:*:*:*:*:*:*:*",
"cpe:2.3:a:ncurses:ncurses_bin:6.0+20161126-1+deb9u2:*:*:*:*:*:*:*",
],
"license": "Unknown",
"origin": "Craig Small <csmall@debian.org> (maintainer)",
"release": "N/A",
"size": "536000",
"sourcepkg": "ncurses",
"type": "dpkg",
"version": "6.0+20161126-1+deb9u2",
},
"passwd": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:passwd:passwd:1:4.4-4.1:*:*:*:*:*:*:*"],
"license": "GPL-2",
"origin": "Shadow package maintainers <pkg-shadow-devel@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "2478000",
"sourcepkg": "shadow",
"type": "dpkg",
"version": "1:4.4-4.1",
},
"perl-base": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:perl-base:perl-base:5.24.1-3+deb9u7:*:*:*:*:*:*:*",
"cpe:2.3:a:perl_base:perl-base:5.24.1-3+deb9u7:*:*:*:*:*:*:*",
"cpe:2.3:a:perl-base:perl_base:5.24.1-3+deb9u7:*:*:*:*:*:*:*",
"cpe:2.3:a:perl_base:perl_base:5.24.1-3+deb9u7:*:*:*:*:*:*:*",
"cpe:2.3:a:perl:perl-base:5.24.1-3+deb9u7:*:*:*:*:*:*:*",
"cpe:2.3:a:perl:perl_base:5.24.1-3+deb9u7:*:*:*:*:*:*:*",
],
"license": "Artistic Artistic-2 BSD-3-clause BSD-3-clause-GENERIC BSD-3-clause-with-weird-numbering BSD-4-clause-POWERDOG BZIP DONT-CHANGE-THE-GPL Expat GPL-1 GPL-1+ GPL-2 GPL-2+ GPL-3+-WITH-BISON-EXCEPTION HSIEH-BSD HSIEH-DERIVATIVE LGPL-2.1 REGCOMP REGCOMP, RRA-KEEP-THIS-NOTICE S2P SDBM-PUBLIC-DOMAIN TEXT-TABS Unicode ZLIB",
"origin": "Niko Tyni <ntyni@debian.org> (maintainer)",
"release": "N/A",
"size": "7551000",
"sourcepkg": "perl",
"type": "dpkg",
"version": "5.24.1-3+deb9u7",
},
"sed": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:sed:sed:4.4-1:*:*:*:*:*:*:*"],
"license": "GPL-3",
"origin": "Clint Adams <clint@debian.org> (maintainer)",
"release": "N/A",
"size": "799000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "4.4-1",
},
"sensible-utils": {
"arch": "all",
"cpes": [
"cpe:2.3:a:sensible-utils:sensible-utils:0.0.9+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:sensible_utils:sensible-utils:0.0.9+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:sensible-utils:sensible_utils:0.0.9+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:sensible_utils:sensible_utils:0.0.9+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:sensible:sensible-utils:0.0.9+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:sensible:sensible_utils:0.0.9+deb9u1:*:*:*:*:*:*:*",
],
"license": "GPL-2",
"origin": "Anibal Monsalve Salazar <anibal@debian.org> (maintainer)",
"release": "N/A",
"size": "62000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "0.0.9+deb9u1",
},
"sysvinit-utils": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:sysvinit-utils:sysvinit-utils:2.88dsf-59.9:*:*:*:*:*:*:*",
"cpe:2.3:a:sysvinit_utils:sysvinit-utils:2.88dsf-59.9:*:*:*:*:*:*:*",
"cpe:2.3:a:sysvinit-utils:sysvinit_utils:2.88dsf-59.9:*:*:*:*:*:*:*",
"cpe:2.3:a:sysvinit_utils:sysvinit_utils:2.88dsf-59.9:*:*:*:*:*:*:*",
"cpe:2.3:a:sysvinit:sysvinit-utils:2.88dsf-59.9:*:*:*:*:*:*:*",
"cpe:2.3:a:sysvinit:sysvinit_utils:2.88dsf-59.9:*:*:*:*:*:*:*",
],
"license": "GPL-2",
"origin": "Debian sysvinit maintainers <pkg-sysvinit-devel@lists.alioth.debian.org> (maintainer)",
"release": "N/A",
"size": "110000",
"sourcepkg": "sysvinit",
"type": "dpkg",
"version": "2.88dsf-59.9",
},
"tar": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:tar:tar:1.29b-1.1:*:*:*:*:*:*:*"],
"license": "GPL-2 GPL-3",
"origin": "Bdale Garbee <bdale@gag.com> (maintainer)",
"release": "N/A",
"size": "2770000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "1.29b-1.1",
},
"tzdata": {
"arch": "all",
"cpes": ["cpe:2.3:a:tzdata:tzdata:2020a-0+deb9u1:*:*:*:*:*:*:*"],
"license": "Unknown",
"origin": "GNU Libc Maintainers <debian-glibc@lists.debian.org> (maintainer)",
"release": "N/A",
"size": "3032000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "2020a-0+deb9u1",
},
"util-linux": {
"arch": "amd64",
"cpes": [
"cpe:2.3:a:util_linux:util-linux:2.29.2-1+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:util-linux:util-linux:2.29.2-1+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:util_linux:util_linux:2.29.2-1+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:util-linux:util_linux:2.29.2-1+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:util:util-linux:2.29.2-1+deb9u1:*:*:*:*:*:*:*",
"cpe:2.3:a:util:util_linux:2.29.2-1+deb9u1:*:*:*:*:*:*:*",
],
"license": "BSD-2-clause BSD-3-clause BSD-4-clause GPL-2 GPL-2+ GPL-3 GPL-3+ LGPL LGPL-2 LGPL-2+ LGPL-2.1 LGPL-2.1+ LGPL-3 LGPL-3+ MIT public-domain",
"origin": "Debian util-linux Maintainers <ah-util-linux@debian.org> (maintainer)",
"release": "N/A",
"size": "3558000",
"sourcepkg": "N/A",
"type": "dpkg",
"version": "2.29.2-1+deb9u1",
},
"zlib1g": {
"arch": "amd64",
"cpes": ["cpe:2.3:a:zlib1g:zlib1g:1:1.2.8.dfsg-5:*:*:*:*:*:*:*"],
"license": "Unknown",
"origin": "Mark Brown <broonie@debian.org> (maintainer)",
"release": "N/A",
"size": "156000",
"sourcepkg": "zlib",
"type": "dpkg",
"version": "1:1.2.8.dfsg-5",
},
}
pkgs_plus_source_all = {
"util-linux": "2.29.2-1+deb9u1",
"debianutils": "4.8.1.1",
"zlib1g": "1:1.2.8.dfsg-5",
"libtinfo5": "6.0+20161126-1+deb9u2",
"libgcc1": "1:6.3.0-18+deb9u1",
"libpam-modules-bin": "1.1.8-3.6",
"attr": "1:2.4.47-2",
"lsb": "9.20161125",
"libcomerr2": "1.43.4-2+deb9u2",
"libsemanage": "2.6-2",
}
| |
# match.py - filename matching
#
# Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
import re
import scmutil, util, fileset
from i18n import _
def _rematcher(pat):
m = util.compilere(pat)
try:
# slightly faster, provided by facebook's re2 bindings
return m.test_match
except AttributeError:
return m.match
def _expandsets(pats, ctx):
'''convert set: patterns into a list of files in the given context'''
fset = set()
other = []
for kind, expr in pats:
if kind == 'set':
if not ctx:
raise util.Abort("fileset expression with no context")
s = fileset.getfileset(ctx, expr)
fset.update(s)
continue
other.append((kind, expr))
return fset, other
class match(object):
def __init__(self, root, cwd, patterns, include=[], exclude=[],
default='glob', exact=False, auditor=None, ctx=None):
"""build an object to match a set of file patterns
arguments:
root - the canonical root of the tree you're matching against
cwd - the current working directory, if relevant
patterns - patterns to find
include - patterns to include
exclude - patterns to exclude
default - if a pattern in names has no explicit type, assume this one
exact - patterns are actually literals
a pattern is one of:
'glob:<glob>' - a glob relative to cwd
're:<regexp>' - a regular expression
'path:<path>' - a path relative to repository root
'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
'relpath:<path>' - a path relative to cwd
'relre:<regexp>' - a regexp that needn't match the start of a name
'set:<fileset>' - a fileset expression
'<something>' - a pattern of the specified default type
"""
self._root = root
self._cwd = cwd
self._files = []
self._anypats = bool(include or exclude)
self._ctx = ctx
self._always = False
if include:
pats = _normalize(include, 'glob', root, cwd, auditor)
self.includepat, im = _buildmatch(ctx, pats, '(?:/|$)')
if exclude:
pats = _normalize(exclude, 'glob', root, cwd, auditor)
self.excludepat, em = _buildmatch(ctx, pats, '(?:/|$)')
if exact:
if isinstance(patterns, list):
self._files = patterns
else:
self._files = list(patterns)
pm = self.exact
elif patterns:
pats = _normalize(patterns, default, root, cwd, auditor)
self._files = _roots(pats)
self._anypats = self._anypats or _anypats(pats)
self.patternspat, pm = _buildmatch(ctx, pats, '$')
if patterns or exact:
if include:
if exclude:
m = lambda f: im(f) and not em(f) and pm(f)
else:
m = lambda f: im(f) and pm(f)
else:
if exclude:
m = lambda f: not em(f) and pm(f)
else:
m = pm
else:
if include:
if exclude:
m = lambda f: im(f) and not em(f)
else:
m = im
else:
if exclude:
m = lambda f: not em(f)
else:
m = lambda f: True
self._always = True
self.matchfn = m
self._fmap = set(self._files)
def __call__(self, fn):
return self.matchfn(fn)
def __iter__(self):
for f in self._files:
yield f
def bad(self, f, msg):
'''callback for each explicit file that can't be
found/accessed, with an error message
'''
pass
def dir(self, f):
pass
def missing(self, f):
pass
def exact(self, f):
return f in self._fmap
def rel(self, f):
return util.pathto(self._root, self._cwd, f)
def files(self):
return self._files
def anypats(self):
return self._anypats
def always(self):
return self._always
class exact(match):
def __init__(self, root, cwd, files):
match.__init__(self, root, cwd, files, exact = True)
class always(match):
def __init__(self, root, cwd):
match.__init__(self, root, cwd, [])
self._always = True
class narrowmatcher(match):
"""Adapt a matcher to work on a subdirectory only.
The paths are remapped to remove/insert the path as needed:
>>> m1 = match('root', '', ['a.txt', 'sub/b.txt'])
>>> m2 = narrowmatcher('sub', m1)
>>> bool(m2('a.txt'))
False
>>> bool(m2('b.txt'))
True
>>> bool(m2.matchfn('a.txt'))
False
>>> bool(m2.matchfn('b.txt'))
True
>>> m2.files()
['b.txt']
>>> m2.exact('b.txt')
True
>>> m2.rel('b.txt')
'b.txt'
>>> def bad(f, msg):
... print "%s: %s" % (f, msg)
>>> m1.bad = bad
>>> m2.bad('x.txt', 'No such file')
sub/x.txt: No such file
"""
def __init__(self, path, matcher):
self._root = matcher._root
self._cwd = matcher._cwd
self._path = path
self._matcher = matcher
self._always = matcher._always
self._files = [f[len(path) + 1:] for f in matcher._files
if f.startswith(path + "/")]
self._anypats = matcher._anypats
self.matchfn = lambda fn: matcher.matchfn(self._path + "/" + fn)
self._fmap = set(self._files)
def bad(self, f, msg):
self._matcher.bad(self._path + "/" + f, msg)
def patkind(pat):
return _patsplit(pat, None)[0]
def _patsplit(pat, default):
"""Split a string into an optional pattern kind prefix and the
actual pattern."""
if ':' in pat:
kind, val = pat.split(':', 1)
if kind in ('re', 'glob', 'path', 'relglob', 'relpath', 'relre',
'listfile', 'listfile0', 'set'):
return kind, val
return default, pat
def _globre(pat):
"convert a glob pattern into a regexp"
i, n = 0, len(pat)
res = ''
group = 0
escape = re.escape
def peek():
return i < n and pat[i]
while i < n:
c = pat[i]
i += 1
if c not in '*?[{},\\':
res += escape(c)
elif c == '*':
if peek() == '*':
i += 1
res += '.*'
else:
res += '[^/]*'
elif c == '?':
res += '.'
elif c == '[':
j = i
if j < n and pat[j] in '!]':
j += 1
while j < n and pat[j] != ']':
j += 1
if j >= n:
res += '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j + 1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
elif c == '{':
group += 1
res += '(?:'
elif c == '}' and group:
res += ')'
group -= 1
elif c == ',' and group:
res += '|'
elif c == '\\':
p = peek()
if p:
i += 1
res += escape(p)
else:
res += escape(c)
else:
res += escape(c)
return res
def _regex(kind, name, tail):
'''convert a pattern into a regular expression'''
if not name:
return ''
if kind == 're':
return name
elif kind == 'path':
return '^' + re.escape(name) + '(?:/|$)'
elif kind == 'relglob':
return '(?:|.*/)' + _globre(name) + tail
elif kind == 'relpath':
return re.escape(name) + '(?:/|$)'
elif kind == 'relre':
if name.startswith('^'):
return name
return '.*' + name
return _globre(name) + tail
def _buildmatch(ctx, pats, tail):
fset, pats = _expandsets(pats, ctx)
if not pats:
return "", fset.__contains__
pat, mf = _buildregexmatch(pats, tail)
if fset:
return pat, lambda f: f in fset or mf(f)
return pat, mf
def _buildregexmatch(pats, tail):
"""build a matching function from a set of patterns"""
try:
pat = '(?:%s)' % '|'.join([_regex(k, p, tail) for (k, p) in pats])
if len(pat) > 20000:
raise OverflowError
return pat, _rematcher(pat)
except OverflowError:
# We're using a Python with a tiny regex engine and we
# made it explode, so we'll divide the pattern list in two
# until it works
l = len(pats)
if l < 2:
raise
pata, a = _buildregexmatch(pats[:l//2], tail)
patb, b = _buildregexmatch(pats[l//2:], tail)
return pat, lambda s: a(s) or b(s)
except re.error:
for k, p in pats:
try:
_rematcher('(?:%s)' % _regex(k, p, tail))
except re.error:
raise util.Abort(_("invalid pattern (%s): %s") % (k, p))
raise util.Abort(_("invalid pattern"))
def _normalize(names, default, root, cwd, auditor):
pats = []
for kind, name in [_patsplit(p, default) for p in names]:
if kind in ('glob', 'relpath'):
name = scmutil.canonpath(root, cwd, name, auditor)
elif kind in ('relglob', 'path'):
name = util.normpath(name)
elif kind in ('listfile', 'listfile0'):
try:
files = util.readfile(name)
if kind == 'listfile0':
files = files.split('\0')
else:
files = files.splitlines()
files = [f for f in files if f]
except EnvironmentError:
raise util.Abort(_("unable to read file list (%s)") % name)
pats += _normalize(files, default, root, cwd, auditor)
continue
pats.append((kind, name))
return pats
def _roots(patterns):
r = []
for kind, name in patterns:
if kind == 'glob': # find the non-glob prefix
root = []
for p in name.split('/'):
if '[' in p or '{' in p or '*' in p or '?' in p:
break
root.append(p)
r.append('/'.join(root) or '.')
elif kind in ('relpath', 'path'):
r.append(name or '.')
else: # relglob, re, relre
r.append('.')
return r
def _anypats(patterns):
for kind, name in patterns:
if kind in ('glob', 're', 'relglob', 'relre', 'set'):
return True
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains functions for reading and writing QDP tables that are
not meant to be used directly, but instead are available as readers/writers in
`astropy.table`. See :ref:`astropy:table_io` for more details.
"""
import re
import copy
from collections.abc import Iterable
import numpy as np
import warnings
from astropy.utils.exceptions import AstropyUserWarning
from astropy.table import Table
from . import core, basic
def _line_type(line, delimiter=None):
"""Interpret a QDP file line
Parameters
----------
line : str
a single line of the file
Returns
-------
type : str
Line type: "comment", "command", or "data"
Examples
--------
>>> _line_type("READ SERR 3")
'command'
>>> _line_type(" \\n !some gibberish")
'comment'
>>> _line_type(" ")
'comment'
>>> _line_type(" 21345.45")
'data,1'
>>> _line_type(" 21345.45 1.53e-3 1e-3 .04 NO nan")
'data,6'
>>> _line_type(" 21345.45,1.53e-3,1e-3,.04,NO,nan", delimiter=',')
'data,6'
>>> _line_type(" 21345.45 ! a comment to disturb")
'data,1'
>>> _line_type("NO NO NO NO NO")
'new'
>>> _line_type("NO,NO,NO,NO,NO", delimiter=',')
'new'
>>> _line_type("N O N NOON OON O")
Traceback (most recent call last):
...
ValueError: Unrecognized QDP line...
>>> _line_type(" some non-comment gibberish")
Traceback (most recent call last):
...
ValueError: Unrecognized QDP line...
"""
_decimal_re = r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?'
_command_re = r'READ [TS]ERR(\s+[0-9]+)+'
sep = delimiter
if delimiter is None:
sep = r'\s+'
_new_re = rf'NO({sep}NO)+'
_data_re = rf'({_decimal_re}|NO|[-+]?nan)({sep}({_decimal_re}|NO|[-+]?nan))*)'
_type_re = rf'^\s*((?P<command>{_command_re})|(?P<new>{_new_re})|(?P<data>{_data_re})?\s*(\!(?P<comment>.*))?\s*$'
_line_type_re = re.compile(_type_re)
line = line.strip()
if not line:
return 'comment'
match = _line_type_re.match(line)
if match is None:
raise ValueError(f'Unrecognized QDP line: {line}')
for type_, val in match.groupdict().items():
if val is None:
continue
if type_ == 'data':
return f'data,{len(val.split(sep=delimiter))}'
else:
return type_
def _get_type_from_list_of_lines(lines, delimiter=None):
"""Read through the list of QDP file lines and label each line by type
Parameters
----------
lines : list
List containing one file line in each entry
Returns
-------
contents : list
List containing the type for each line (see `line_type_and_data`)
ncol : int
The number of columns in the data lines. Must be the same throughout
the file
Examples
--------
>>> line0 = "! A comment"
>>> line1 = "543 12 456.0"
>>> lines = [line0, line1]
>>> types, ncol = _get_type_from_list_of_lines(lines)
>>> types[0]
'comment'
>>> types[1]
'data,3'
>>> ncol
3
>>> lines.append("23")
>>> _get_type_from_list_of_lines(lines)
Traceback (most recent call last):
...
ValueError: Inconsistent number of columns
"""
types = [_line_type(line, delimiter=delimiter) for line in lines]
current_ncol = None
for type_ in types:
if type_.startswith('data', ):
ncol = int(type_[5:])
if current_ncol is None:
current_ncol = ncol
elif ncol != current_ncol:
raise ValueError('Inconsistent number of columns')
return types, current_ncol
def _get_lines_from_file(qdp_file):
if "\n" in qdp_file:
lines = qdp_file.split("\n")
elif isinstance(qdp_file, str):
with open(qdp_file) as fobj:
lines = [line.strip() for line in fobj.readlines()]
elif isinstance(qdp_file, Iterable):
lines = qdp_file
return lines
def _interpret_err_lines(err_specs, ncols, names=None):
"""Give list of column names from the READ SERR and TERR commands
Parameters
----------
err_specs : dict
``{'serr': [n0, n1, ...], 'terr': [n2, n3, ...]}``
Error specifications for symmetric and two-sided errors
ncols : int
Number of data columns
Other parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
Returns
-------
colnames : list
List containing the column names. Error columns will have the name
of the main column plus ``_err`` for symmetric errors, and ``_perr``
and ``_nerr`` for positive and negative errors respectively
Examples
--------
>>> col_in = ['MJD', 'Rate']
>>> cols = _interpret_err_lines(None, 2, names=col_in)
>>> cols[0]
'MJD'
>>> err_specs = {'terr': [1], 'serr': [2]}
>>> ncols = 5
>>> cols = _interpret_err_lines(err_specs, ncols, names=col_in)
>>> cols[0]
'MJD'
>>> cols[2]
'MJD_nerr'
>>> cols[4]
'Rate_err'
>>> _interpret_err_lines(err_specs, 6, names=col_in)
Traceback (most recent call last):
...
ValueError: Inconsistent number of input colnames
"""
colnames = ["" for i in range(ncols)]
if err_specs is None:
serr_cols = terr_cols = []
else:
# I don't want to empty the original one when using `pop` below
err_specs = copy.deepcopy(err_specs)
serr_cols = err_specs.pop("serr", [])
terr_cols = err_specs.pop("terr", [])
if names is not None:
all_error_cols = len(serr_cols) + len(terr_cols) * 2
if all_error_cols + len(names) != ncols:
raise ValueError("Inconsistent number of input colnames")
shift = 0
for i in range(ncols):
col_num = i + 1 - shift
if colnames[i] != "":
continue
colname_root = f"col{col_num}"
if names is not None:
colname_root = names[col_num - 1]
colnames[i] = f"{colname_root}"
if col_num in serr_cols:
colnames[i + 1] = f"{colname_root}_err"
shift += 1
continue
if col_num in terr_cols:
colnames[i + 1] = f"{colname_root}_perr"
colnames[i + 2] = f"{colname_root}_nerr"
shift += 2
continue
assert not np.any([c == "" for c in colnames])
return colnames
def _get_tables_from_qdp_file(qdp_file, input_colnames=None, delimiter=None):
"""Get all tables from a QDP file
Parameters
----------
qdp_file : str
Input QDP file name
Other parameters
----------------
input_colnames : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
delimiter : str
Delimiter for the values in the table.
Returns
-------
list of `~astropy.table.Table`
List containing all the tables present inside the QDP file
"""
lines = _get_lines_from_file(qdp_file)
contents, ncol = _get_type_from_list_of_lines(lines, delimiter=delimiter)
table_list = []
err_specs = {}
colnames = None
comment_text = ""
initial_comments = ""
command_lines = ""
current_rows = None
for line, datatype in zip(lines, contents):
line = line.strip().lstrip('!')
# Is this a comment?
if datatype == "comment":
comment_text += line + '\n'
continue
if datatype == "command":
# The first time I find commands, I save whatever comments into
# The initial comments.
if command_lines == "":
initial_comments = comment_text
comment_text = ""
if err_specs != {}:
warnings.warn(
"This file contains multiple command blocks. Please verify",
AstropyUserWarning
)
command_lines += line + '\n'
continue
if datatype.startswith("data"):
# The first time I find data, I define err_specs
if err_specs == {} and command_lines != "":
for cline in command_lines.strip().split('\n'):
command = cline.strip().split()
# This should never happen, but just in case.
if len(command) < 3:
continue
err_specs[command[1].lower()] = [int(c) for c in
command[2:]]
if colnames is None:
colnames = _interpret_err_lines(
err_specs, ncol, names=input_colnames
)
if current_rows is None:
current_rows = []
values = []
for v in line.split(delimiter):
if v == "NO":
values.append(np.ma.masked)
else:
# Understand if number is int or float
try:
values.append(int(v))
except ValueError:
values.append(float(v))
current_rows.append(values)
continue
if datatype == "new":
# Save table to table_list and reset
if current_rows is not None:
new_table = Table(names=colnames, rows=current_rows)
new_table.meta["initial_comments"] = initial_comments.strip().split("\n")
new_table.meta["comments"] = comment_text.strip().split("\n")
# Reset comments
comment_text = ""
table_list.append(new_table)
current_rows = None
continue
# At the very end, if there is still a table being written, let's save
# it to the table_list
if current_rows is not None:
new_table = Table(names=colnames, rows=current_rows)
new_table.meta["initial_comments"] = initial_comments.strip().split("\n")
new_table.meta["comments"] = comment_text.strip().split("\n")
table_list.append(new_table)
return table_list
def _understand_err_col(colnames):
"""Get which column names are error columns
Examples
--------
>>> colnames = ['a', 'a_err', 'b', 'b_perr', 'b_nerr']
>>> serr, terr = _understand_err_col(colnames)
>>> np.allclose(serr, [1])
True
>>> np.allclose(terr, [2])
True
>>> serr, terr = _understand_err_col(['a', 'a_nerr'])
Traceback (most recent call last):
...
ValueError: Missing positive error...
>>> serr, terr = _understand_err_col(['a', 'a_perr'])
Traceback (most recent call last):
...
ValueError: Missing negative error...
"""
shift = 0
serr = []
terr = []
for i, col in enumerate(colnames):
if col.endswith("_err"):
# The previous column, but they're numbered from 1!
# Plus, take shift into account
serr.append(i - shift)
shift += 1
elif col.endswith("_perr"):
terr.append(i - shift)
if len(colnames) == i + 1 or not colnames[i + 1].endswith('_nerr'):
raise ValueError("Missing negative error")
shift += 2
elif col.endswith("_nerr") and not colnames[i - 1].endswith('_perr'):
raise ValueError("Missing positive error")
return serr, terr
def _read_table_qdp(qdp_file, names=None, table_id=None, delimiter=None):
"""Read a table from a QDP file
Parameters
----------
qdp_file : str
Input QDP file name
Other parameters
----------------
names : list of str
Name of data columns (defaults to ['col1', 'col2', ...]), _not_
including error columns.
table_id : int, default 0
Number of the table to be read from the QDP file. This is useful
when multiple tables present in the file. By default, the first is read.
delimiter : str
Any delimiter accepted by the `sep` argument of str.split()
Returns
-------
tables : list of `~astropy.table.Table`
List containing all the tables present inside the QDP file
"""
if table_id is None:
warnings.warn("table_id not specified. Reading the first available "
"table", AstropyUserWarning)
table_id = 0
tables = _get_tables_from_qdp_file(qdp_file, input_colnames=names, delimiter=delimiter)
return tables[table_id]
def _write_table_qdp(table, filename=None, err_specs=None):
"""Write a table to a QDP file
Parameters
----------
table : :class:`~astropy.table.Table`
Input table to be written
filename : str
Output QDP file name
Other parameters
----------------
err_specs : dict
Dictionary of the format {'serr': [1], 'terr': [2, 3]}, specifying
which columns have symmetric and two-sided errors (see QDP format
specification)
"""
import io
fobj = io.StringIO()
if 'initial_comments' in table.meta and table.meta['initial_comments'] != []:
for line in table.meta['initial_comments']:
line = line.strip()
if not line.startswith("!"):
line = "!" + line
print(line, file=fobj)
if err_specs is None:
serr_cols, terr_cols = _understand_err_col(table.colnames)
else:
serr_cols = err_specs.pop("serr", [])
terr_cols = err_specs.pop("terr", [])
if serr_cols != []:
col_string = " ".join([str(val) for val in serr_cols])
print(f"READ SERR {col_string}", file=fobj)
if terr_cols != []:
col_string = " ".join([str(val) for val in terr_cols])
print(f"READ TERR {col_string}", file=fobj)
if 'comments' in table.meta and table.meta['comments'] != []:
for line in table.meta['comments']:
line = line.strip()
if not line.startswith("!"):
line = "!" + line
print(line, file=fobj)
colnames = table.colnames
print("!" + " ".join(colnames), file=fobj)
for row in table:
values = []
for val in row:
if not np.ma.is_masked(val):
rep = str(val)
else:
rep = "NO"
values.append(rep)
print(" ".join(values), file=fobj)
full_string = fobj.getvalue()
fobj.close()
if filename is not None:
with open(filename, 'w') as fobj:
print(full_string, file=fobj)
return full_string.split("\n")
class QDPSplitter(core.DefaultSplitter):
"""
Split on space for QDP tables
"""
delimiter = ' '
class QDPHeader(basic.CommentedHeaderHeader):
"""
Header that uses the :class:`astropy.io.ascii.basic.QDPSplitter`
"""
splitter_class = QDPSplitter
comment = "!"
write_comment = "!"
class QDPData(basic.BasicData):
"""
Data that uses the :class:`astropy.io.ascii.basic.CsvSplitter`
"""
splitter_class = QDPSplitter
fill_values = [(core.masked, 'NO')]
comment = "!"
write_comment = None
class QDP(basic.Basic):
"""Quick and Dandy Plot table.
Example::
! Initial comment line 1
! Initial comment line 2
READ TERR 1
READ SERR 3
! Table 0 comment
!a a(pos) a(neg) b be c d
53000.5 0.25 -0.5 1 1.5 3.5 2
54000.5 1.25 -1.5 2 2.5 4.5 3
NO NO NO NO NO
! Table 1 comment
!a a(pos) a(neg) b be c d
54000.5 2.25 -2.5 NO 3.5 5.5 5
55000.5 3.25 -3.5 4 4.5 6.5 nan
The input table above contains some initial comments, the error commands,
then two tables.
This file format can contain multiple tables, separated by a line full
of ``NO``s. Comments are exclamation marks, and missing values are single
``NO`` entries. The delimiter is usually whitespace, more rarely a comma.
The QDP format differentiates between data and error columns. The table
above has commands::
READ TERR 1
READ SERR 3
which mean that after data column 1 there will be two error columns
containing its positive and engative error bars, then data column 2 without
error bars, then column 3, then a column with the symmetric error of column
3, then the remaining data columns.
As explained below, table headers are highly inconsistent. Possible
comments containing column names will be ignored and columns will be called
``col1``, ``col2``, etc. unless the user specifies their names with the
``names=`` keyword argument,
When passing column names, pass **only the names of the data columns, not
the error columns.**
Error information will be encoded in the names of the table columns.
(e.g. ``a_perr`` and ``a_nerr`` for the positive and negative error of
column ``a``, ``b_err`` the symmetric error of column ``b``.)
When writing tables to this format, users can pass an ``err_specs`` keyword
passing a dictionary ``{'serr': [3], 'terr': [1, 2]}``, meaning that data
columns 1 and two will have two additional columns each with their positive
and negative errors, and data column 3 will have an additional column with
a symmetric error (just like the ``READ SERR`` and ``READ TERR`` commands
above)
Headers are just comments, and tables distributed by various missions
can differ greatly in their use of conventions. For example, light curves
distributed by the Swift-Gehrels mission have an extra space in one header
entry that makes the number of labels inconsistent with the number of cols.
For this reason, we ignore the comments that might encode the column names
and leave the name specification to the user.
Example::
> Extra space
> |
> v
>! MJD Err (pos) Err(neg) Rate Error
>53000.123456 2.378e-05 -2.378472e-05 NO 0.212439
These readers and writer classes will strive to understand which of the
comments belong to all the tables, and which ones to each single table.
General comments will be stored in the ``initial_comments`` meta of each
table. The comments of each table will be stored in the ``comments`` meta.
Example::
t = Table.read(example_qdp, format='ascii.qdp', table_id=1, names=['a', 'b', 'c', 'd'])
reads the second table (``table_id=1``) in file ``example.qdp`` containing
the table above. There are four column names but seven data columns, why?
Because the ``READ SERR`` and ``READ TERR`` commands say that there are
three error columns.
``t.meta['initial_comments']`` will contain the initial two comment lines
in the file, while ``t.meta['comments']`` will contain ``Table 1 comment``
The table can be written to another file, preserving the same information,
as::
t.write(test_file, err_specs={'terr': [1], 'serr': [3]})
Note how the ``terr`` and ``serr`` commands are passed to the writer.
"""
_format_name = 'qdp'
_io_registry_can_write = True
_io_registry_suffix = '.qdp'
_description = 'Quick and Dandy Plotter'
header_class = QDPHeader
data_class = QDPData
def __init__(self, table_id=None, names=None, err_specs=None, sep=None):
super().__init__()
self.table_id = table_id
self.names = names
self.err_specs = err_specs
self.delimiter = sep
def read(self, table):
self.lines = self.inputter.get_lines(table, newline="\n")
return _read_table_qdp(self.lines, table_id=self.table_id,
names=self.names, delimiter=self.delimiter)
def write(self, table):
self._check_multidim_table(table)
lines = _write_table_qdp(table, err_specs=self.err_specs)
return lines
| |
# -*- coding: utf-8 -*-
"""
test_models
-----------
Tests for `django-gcse` model module.
"""
from __future__ import unicode_literals
from io import StringIO
from lxml import etree as ET
import mock
from django.db import IntegrityError
from django.test import TestCase
from django.test.utils import override_settings
from gcse.models import CustomSearchEngine, CSESAXHandler, Label, FacetItem, Annotation, AnnotationSAXHandler
# Default CSE XML created by google
CSE_XML = b"""<CustomSearchEngine id="c12345-r678" creator="creatorid" keywords="" language="en" domain="www.google.com" safesearch="true" encoding="utf-8">
<Title>AgilityNerd Site Search</Title>
<Context>
<BackgroundLabels>
<Label name="_cse_c12345-r678" mode="FILTER"/>
<Label name="_cse_exclude_c12345-r678" mode="ELIMINATE"/>
<Label name="blogs" mode="BOOST" weight="0.8"/>
</BackgroundLabels>
</Context>
<LookAndFeel code="2" resultsurl="http://agilitynerd.com/blog/googlesearch.index" adsposition="11" googlebranding="watermark" searchboxsize="31" resultswidth="500" element_layout="1" theme="1" custom_theme="true" text_font="Arial, sans-serif" url_length="full" element_branding="show" enable_cse_thumbnail="true" promotion_url_length="full" ads_layout="1">
<Logo/>
<Colors url="#008000" background="#FFFFFF" border="#336699" title="#0000FF" text="#000000" visited="#663399" light="0000FF" logobg="336699" title_hover="#0000CC" title_active="#0000CC"/>
<Promotions title_color="#0000CC" title_visited_color="#0000CC" url_color="#008000" background_color="#FFFFFF" border_color="#336699" snippet_color="#000000" title_hover_color="#0000CC" title_active_color="#0000CC"/>
<SearchControls input_border_color="#D9D9D9" button_border_color="#666666" button_background_color="#CECECE" tab_border_color="#E9E9E9" tab_background_color="#E9E9E9" tab_selected_border_color="#FF9900" tab_selected_background_color="#FFFFFF"/>
<Results border_color="#FFFFFF" border_hover_color="#FFFFFF" background_color="#FFFFFF" background_hover_color="#FFFFFF" ads_background_color="#FDF6E5" ads_border_color="#FDF6E5"/>
</LookAndFeel>
<AdSense>
<Client id="partner-pub-id"/>
</AdSense>
<EnterpriseAccount/>
<ImageSearchSettings enable="true"/>
<autocomplete_settings/>
<cse_advance_settings enable_speech="true"/>
</CustomSearchEngine>"""
# Semi customized
FACETED_XML = b"""<GoogleCustomizations version="1.0">
<CustomSearchEngine id="csekeystring" version="1.0" creator="anothercreatorid" encoding="utf-8" volunteers="false" keywords="" visible="true" top_refinements="4">
<Title>AgilityNerd Dog Agility Search</Title>
<Description>Search for Dog Agility topics, clubs, trainers, facilities, organizations and stores</Description>
<Context refinementsTitle="Refine results for $q:">
<Facet>
<FacetItem title="Blogs">
<Label name="blog" mode="BOOST"/>
</FacetItem>
<FacetItem title="Clubs">
<Label name="club" mode="FILTER"/>
</FacetItem>
<FacetItem title="Equipment">
<Label name="equipment" mode="FILTER"/>
</FacetItem>
<FacetItem title="Forums">
<Label name="forum" mode="FILTER"/>
</FacetItem>
</Facet>
<Facet>
<FacetItem title="General">
<Label name="general" mode="FILTER"/>
</FacetItem>
<FacetItem title="Organizations">
<Label name="organization" mode="FILTER"/>
</FacetItem>
<FacetItem title="Services">
<Label name="service" mode="FILTER"/>
</FacetItem>
<FacetItem title="Stores">
<Label name="store" mode="FILTER"/>
</FacetItem>
</Facet>
<Facet>
<FacetItem title="Training">
<Label name="training" mode="FILTER"/>
</FacetItem>
<FacetItem title="Training Facilities">
<Label name="facility" mode="FILTER"/>
</FacetItem>
<FacetItem title="Video">
<Label name="video" mode="FILTER"/>
</FacetItem>
<FacetItem title="Ring Rental">
<Label name="rental" mode="FILTER"/>
</FacetItem>
</Facet>
<BackgroundLabels>
<Label name="_cse_csekeystring" mode="FILTER" weight="1.0"/>
<Label name="_cse_exclude_csekeystring" mode="ELIMINATE" weight="1.0"/>
</BackgroundLabels>
</Context>
<LookAndFeel>
<Logo url="http://data.agilitynerd.com/images/AgilityNerd_SideBySide.gif" destination="http://agilitynerd.com" height="51"/>
</LookAndFeel>
<SubscribedLinks/>
<AdSense/>
<EnterpriseAccount/>
</CustomSearchEngine>
<Include type="Annotations" href="http://googility.com/googility_annotations.xml"/>
</GoogleCustomizations>"""
ANNOTATION_XML = b"""<Annotations start="0" num="7" total="7">
<Annotation about="tech.agilitynerd.com/author/" timestamp="0x0004d956807a35fd" href="Chx0ZWNoLmFnaWxpdHluZXJkLmNvbS9hdXRob3IvEP3r6IPoqrYC">
<Label name="_cse_exclude_keystring" />
<AdditionalData attribute="original_url" value="tech.agilitynerd.com/author/" />
</Annotation>
<Annotation about="tech.agilitynerd.com/archives.html" timestamp="0x0004d9567c1f41da" href="CiJ0ZWNoLmFnaWxpdHluZXJkLmNvbS9hcmNoaXZlcy5odG1sENqD_eDnqrYC">
<Label name="_cse_exclude_keystring" />
<AdditionalData attribute="original_url" value="tech.agilitynerd.com/archives.html" />
</Annotation>
<Annotation about="tech.agilitynerd.com/category/" timestamp="0x0004d829da16938d" href="Ch50ZWNoLmFnaWxpdHluZXJkLmNvbS9jYXRlZ29yeS8Qjafa0J2FtgI">
<Label name="_cse_exclude_keystring" />
<AdditionalData attribute="original_url" value="tech.agilitynerd.com/category/" />
</Annotation>
<Annotation about="tech.agilitynerd.com/tag/" timestamp="0x0004d829d8c7d8fd" href="Chl0ZWNoLmFnaWxpdHluZXJkLmNvbS90YWcvEP2xn8adhbYC">
<Label name="_cse_exclude_keystring" />
<AdditionalData attribute="original_url" value="tech.agilitynerd.com/tag/" />
</Annotation>
<Annotation about="tech.agilitynerd.com/*" score="1" timestamp="0x0004d825f3ed22b2" href="ChZ0ZWNoLmFnaWxpdHluZXJkLmNvbS8qELLFtJ_fhLYC">
<Label name="_cse_adifferentkeystring" />
<AdditionalData attribute="original_url" value="tech.agilitynerd.com" />
</Annotation>
<Annotation about="tech.agilitynerd.com/*">
<Label name="_cse_keystring" />
<AdditionalData attribute="original_url" value="tech.agilitynerd.com/*" />
<Comment>here's a comment</Comment>
</Annotation>
</Annotations>"""
class TestCustomSearchEngine(TestCase):
def test_adding_background_labels(self):
cse = CustomSearchEngine(gid='c12345-r678',
title='AgilityNerd Site Search'
)
cse.save()
l1 = Label(name="keystring",
mode=Label.MODE.filter)
l1.save()
cse.background_labels.add(l1)
l2 = Label(name="exclude_keystring",
mode=Label.MODE.eliminate)
l2.save()
cse.background_labels.add(l2)
class TestImportCustomSearchEngine(TestCase):
def test_insert(self):
cse = CustomSearchEngine(gid='c12345-r678',
title='AgilityNerd Site Search'
)
cse.save()
def test_gid_is_unique(self):
cse = CustomSearchEngine(gid='c12345-r678',
title='AgilityNerd Site Search'
)
cse.save()
cse = CustomSearchEngine(gid='c12345-r678',
title='AgilityNerd Site Search'
)
self.assertRaises(IntegrityError, cse.save)
def test_gid_creator_populated_from_google_xml_as_string(self):
cse = CustomSearchEngine.from_string(CSE_XML)
self.assertEqual("c12345-r678", cse.gid)
self.assertEqual("creatorid", cse.creator)
def _extractPath(xml, path):
doc = ET.fromstring(xml)
return doc.xpath(path)
def _extractPathElementText(xml, path):
rowset = _extractPath(xml, path)
if rowset:
return rowset[0].text
return None
def _extractPathAsString(xml, path):
rowset = _extractPath(xml, path)
if rowset:
return ET.tostring(rowset[0], encoding="unicode").strip()
return ''
class TestCSEUpdateXML(TestCase):
def test_output_xml_has_new_gid_when_no_changes_to_instance(self):
cse = CustomSearchEngine(gid="c12345-r999",
input_xml=CSE_XML)
cse.save()
self.assertEqual('c12345-r999',
_extractPath(cse.output_xml,
"/GoogleCustomizations/CustomSearchEngine")[0].attrib['id'])
self.assertEqual('', cse.title) # no title set so leave XML alone
self.assertEqual("AgilityNerd Site Search",
_extractPathElementText(cse.output_xml, "/GoogleCustomizations/CustomSearchEngine/Title"))
def test_output_xml_has_new_title_when_title_is_changed(self):
cse = CustomSearchEngine(gid="c12345-r678",
title="""Here's a new title in need of escaping: &<>""",
input_xml=CSE_XML)
cse.save()
self.assertEqual(cse.title,
_extractPathElementText(cse.output_xml,
"/GoogleCustomizations/CustomSearchEngine/Title"))
def test_output_xml_has_new_title_element_when_there_is_no_title_element(self):
input_xml = """<CustomSearchEngine id="c12345-r678" keywords="" language="en" encoding="ISO-8859-1" domain="www.google.com" safesearch="true"><Context/></CustomSearchEngine>"""
cse = CustomSearchEngine(gid="c12345-r678",
title="""Here's a new title in need of escaping: &<>""",
input_xml=input_xml)
cse.save()
self.assertEqual(cse.title,
_extractPathElementText(cse.output_xml,
"/GoogleCustomizations/CustomSearchEngine/Title"))
def test_output_xml_has_new_description_when_description_is_changed(self):
cse = CustomSearchEngine(gid="c12345-r678",
description="""Here's a new description in need of escaping: &<>""",
input_xml=CSE_XML)
cse.save()
self.assertEqual(cse.description,
_extractPathElementText(cse.output_xml,
"/GoogleCustomizations/CustomSearchEngine/Description"))
def test_output_xml_has_new_description_element_when_there_is_no_description_element(self):
input_xml = """<CustomSearchEngine id="c12345-r678" keywords="" language="en" encoding="ISO-8859-1" domain="www.google.com" safesearch="true"><Context/></CustomSearchEngine>"""
cse = CustomSearchEngine(gid="c12345-r678",
description="""Here's a new description in need of escaping: &<>""",
input_xml=input_xml)
cse.save()
self.assertEqual(cse.description,
_extractPathElementText(cse.output_xml,
"/GoogleCustomizations/CustomSearchEngine/Description"))
def test_output_xml_has_new_title_and_description_when_neither_exist(self):
input_xml = """<CustomSearchEngine id="c12345-r678" keywords="" language="en" encoding="ISO-8859-1" domain="www.google.com" safesearch="true"><Context/></CustomSearchEngine>"""
cse = CustomSearchEngine(gid="c12345-r678",
title="""Here's a new title in need of escaping: &<>""",
description="""Here's a new description in need of escaping: &<>""",
input_xml=input_xml)
cse.save()
self.assertEqual(cse.title,
_extractPathElementText(cse.output_xml,
"/GoogleCustomizations/CustomSearchEngine/Title"))
self.assertEqual(cse.description,
_extractPathElementText(cse.output_xml,
"/GoogleCustomizations/CustomSearchEngine/Description"))
def test_output_xml_has_annotation_include(self):
cse = CustomSearchEngine(gid="c12345-r678",
input_xml=FACETED_XML)
cse.save()
self.assertEqual(1,
len(_extractPath(cse.output_xml,
"/GoogleCustomizations/Include")))
self.assertEqual('<Include type="Annotations" href="//example.com/annotations/c12345-r678.0.xml"/>',
_extractPathAsString(cse.output_xml,
"/GoogleCustomizations/Include"))
def test_output_xml_has_annotation_includes(self):
cse = CustomSearchEngine(gid="c12345-r678",
input_xml=FACETED_XML)
cse.annotation_count = lambda: 2000
cse.save()
self.assertEqual(2,
len(_extractPath(cse.output_xml,
"/GoogleCustomizations/Include")))
self.assertEqual('<Include type="Annotations" href="//example.com/annotations/c12345-r678.0.xml"/>',
_extractPathAsString(cse.output_xml,
"/GoogleCustomizations/Include[1]"))
self.assertEqual('<Include type="Annotations" href="//example.com/annotations/c12345-r678.1.xml"/>',
_extractPathAsString(cse.output_xml,
"/GoogleCustomizations/Include[2]"))
def test_output_xml_has_new_facet_labels(self):
cse = CustomSearchEngine(gid="c12345-r678",
input_xml=FACETED_XML)
cse.save()
label = Label(name="Dogs",
description="Dog refinement",
mode=Label.MODE.filter,
weight=0.7)
label.save()
facet = FacetItem(title="Dogs",
label=label,
cse=cse)
facet.save()
cse.facetitem_set.add(facet)
label = Label(name="Cats",
description="Cat refinement",
mode=Label.MODE.filter,
weight=0.7)
label.save()
facet = FacetItem(title="Cats",
label=label,
cse=cse)
facet.save()
cse.facetitem_set.add(facet)
cse.save()
self.assertEqual(1,
len(_extractPath(cse.output_xml,
".//Context/Facet")))
self.assertEqual(cse.facetitem_set.all()[0].xml(),
_extractPathAsString(cse.output_xml,
".//Context/Facet/FacetItem"))
def test_output_xml_has_new_facet_labels(self):
cse = CustomSearchEngine(gid="c12345-r678",
input_xml=FACETED_XML)
cse.save()
label = Label(name="Dogs",
description="Dog refinement",
mode=Label.MODE.filter,
weight=0.7)
label.save()
facet = FacetItem(title="Dogs",
label=label,
cse=cse)
facet.save()
cse.facetitem_set.add(facet)
label = Label(name="Cats",
description="Cat refinement",
mode=Label.MODE.filter,
weight=0.7)
label.save()
facet = FacetItem(title="Cats",
label=label,
cse=cse)
facet.save()
cse.facetitem_set.add(facet)
cse.save()
self.assertEqual(1,
len(_extractPath(cse.output_xml,
".//Context/Facet")),
"should be only one Facet")
self.assertEqual(2,
len(_extractPath(cse.output_xml,
".//Context/Facet/FacetItem")),
"should be two FacetItems in Facet")
self.assertEqual(cse.facetitem_set.all()[0].xml(),
_extractPathAsString(cse.output_xml,
".//Context/Facet/FacetItem[1]"),
"first FacetItem should be first")
self.assertEqual(cse.facetitem_set.all()[1].xml(),
_extractPathAsString(cse.output_xml,
".//Context/Facet/FacetItem[2]"))
def test_output_xml_num_facet_items_per_facet(self):
cse = CustomSearchEngine.from_string(FACETED_XML)
cse.save()
with override_settings(GCSE_CONFIG={'NUM_FACET_ITEMS_PER_FACET': 2,
'NUM_ANNOTATIONS_PER_FILE': 1000}):
cse.save()
self.assertEqual(6,
len(_extractPath(cse.output_xml,
".//Context/Facet")))
self.assertEqual(2,
len(_extractPath(cse.output_xml,
".//Context/Facet[1]/FacetItem")))
self.assertEqual(2,
len(_extractPath(cse.output_xml,
".//Context/Facet[2]/FacetItem")))
self.assertEqual(2,
len(_extractPath(cse.output_xml,
".//Context/Facet[3]/FacetItem")))
self.assertEqual(2,
len(_extractPath(cse.output_xml,
".//Context/Facet[4]/FacetItem")))
self.assertEqual(2,
len(_extractPath(cse.output_xml,
".//Context/Facet[5]/FacetItem")))
self.assertEqual(2,
len(_extractPath(cse.output_xml,
".//Context/Facet[6]/FacetItem")))
self.assertEqual(cse.facetitem_set.all()[0].xml(),
_extractPathAsString(cse.output_xml,
".//Context/Facet[1]/FacetItem[1]"))
self.assertEqual(cse.facetitem_set.all()[1].xml(),
_extractPathAsString(cse.output_xml,
".//Context/Facet[1]/FacetItem[2]"))
def test_missing_google_customizations(self):
xml = "<CustomSearchEngine/>"
doc = ET.fromstring(xml)
new_doc = CustomSearchEngine._add_google_customizations(doc)
self.assertEqual("GoogleCustomizations", new_doc.tag)
def test_has_google_customizations(self):
xml = "<GoogleCustomizations/>"
doc = ET.fromstring(xml)
new_doc = CustomSearchEngine._add_google_customizations(doc)
self.assertEqual("GoogleCustomizations", new_doc.tag)
class TestCSESAXHandler(TestCase):
def setUp(self):
self.handler = CSESAXHandler()
self.cse, self.annotation_urls = self.handler.parseString(CSE_XML)
def test_gid_is_parsed_from_xml(self):
self.assertEqual('c12345-r678', self.cse.gid)
def test_title_is_parsed_from_xml(self):
self.assertEqual('AgilityNerd Site Search', self.cse.title)
def test_empty_description_is_parsed_from_xml(self):
self.assertEqual('', self.cse.description)
def test_description_is_parsed_from_xml(self):
self.cse, annotation_urls = self.handler.parseString(FACETED_XML)
self.assertEqual(u'Search for Dog Agility topics, clubs, trainers, facilities, organizations and stores',
self.cse.description)
def test_labels_are_parsed_from_xml(self):
cse = self.cse
self.assertEqual(0, cse.facetitem_set.count())
self.assertEqual(3, cse.background_labels.count())
labels = cse.background_labels.all()
label_names = [x.name for x in labels]
self.assertTrue("_cse_c12345-r678" in label_names)
self.assertTrue("_cse_exclude_c12345-r678" in label_names)
label_modes = [x.mode for x in labels]
self.assertTrue(Label.MODE.filter in label_modes)
self.assertTrue(Label.MODE.eliminate in label_modes)
self.assertTrue(Label.MODE.boost in label_modes)
self.assertEqual(True, labels[0].background)
self.assertEqual(True, labels[1].background)
self.assertEqual(True, labels[2].background)
self.assertEqual(None, labels[0].weight)
self.assertEqual(None, labels[1].weight)
self.assertEqual(0.8, labels[2].weight)
def test_labels_are_parsed_from_facets_in_xml(self):
self.cse, annotation_urls = self.handler.parseString(FACETED_XML)
cse = self.cse
self.assertEqual(12, cse.facetitem_set.count())
self.assertEqual(12, len(cse.facet_item_labels_counts()))
labels = cse.facet_item_labels()
label_names = set([x.name for x in labels])
self.assertEqual(set(["blog", "club", "equipment", "forum", "general", "organization", "service", "store", "training", "facility", "video", "rental"]),
label_names)
label_modes = [x.mode for x in labels]
self.assertTrue(Label.MODE.filter in label_modes)
self.assertFalse(Label.MODE.eliminate in label_modes)
self.assertTrue(Label.MODE.boost in label_modes)
self.assertEqual(set(["_cse_csekeystring", "_cse_exclude_csekeystring"]),
set([x.name for x in cse.background_labels.all()]))
def test_input_xml_is_parsed_from_xml(self):
expected = CSE_XML
self.assertEqual(expected,
self.cse.input_xml)
def test_facet_items_are_parsed_from_xml(self):
cse, annotation_urls = self.handler.parseString(FACETED_XML)
self.assertEqual(12, cse.facetitem_set.count())
def test_no_annotation_includes_are_parsed_from_xml(self):
cse, annotation_urls = self.handler.parseString(CSE_XML)
self.assertEqual(0, len(annotation_urls))
def test_annotation_includes_are_parsed_from_xml(self):
cse, annotation_urls = self.handler.parseString(FACETED_XML)
self.assertEqual(1, len(annotation_urls))
class TestLabel(TestCase):
def test_get_mode(self):
self.assertEqual(Label.MODE.eliminate, Label.get_mode("ELIMINATE"))
self.assertEqual(Label.MODE.filter, Label.get_mode("FILTER"))
self.assertEqual(Label.MODE.boost, Label.get_mode("BOOST"))
def test_xml_weight_not_displayed_when_no_weight_specified(self):
label = Label(name="blog")
self.assertEqual('<Label name="blog" mode="FILTER"/>',
label.xml())
def test_xml_weight_not_displayed_when_weight_specified_and_not_requested(self):
label = Label(name="blog",
weight=0.5)
self.assertEqual('<Label name="blog" mode="FILTER"/>',
label.xml(complete=False))
def test_xml_weight_is_displayed(self):
label = Label(name="blog",
weight=0.4)
self.assertEqual('<Label name="blog" mode="FILTER" weight="0.4"/>',
label.xml())
class TestCSEAddingAnnotations(TestCase):
def setUp(self):
self.cse = CustomSearchEngine.from_string(FACETED_XML)
def test_annotation_with_STATUS_ACTIVE_without_labels_not_in_cse(self):
Annotation.objects.create(comment="Active Annotation",
status=Annotation.STATUS.active)
self.assertEqual(0, self.cse.annotation_count())
def test_annotation_with_STATUS_ACTIVE_without_matching_label_not_in_cse(self):
annotation = Annotation.objects.create(comment="Active Annotation",
status=Annotation.STATUS.active)
label = Label.objects.create()
annotation.labels.add(label)
annotation.save()
self.assertEqual(0, self.cse.annotation_count())
def test_annotation_with_matching_label_and_STATUS_SUBMITTED_not_in_cse(self):
annotation = Annotation.objects.create(comment="Active Annotation",
status=Annotation.STATUS.submitted)
annotation.labels.add(self.cse.background_labels.all()[0])
annotation.save()
self.assertEqual(0, self.cse.annotation_count())
def test_annotation_with_matching_label_and_STATUS_DELETED_not_in_cse(self):
annotation = Annotation.objects.create(comment="Active Annotation",
status=Annotation.STATUS.deleted)
annotation.labels.add(self.cse.background_labels.all()[0])
annotation.save()
self.assertEqual(0, self.cse.annotation_count())
def test_annotation_with_matching_label_and_STATUS_ACTIVE_in_cse(self):
annotation = Annotation.objects.create(comment="Active Annotation",
status=Annotation.STATUS.active)
annotation.labels.add(self.cse.background_labels.all()[0])
annotation.save()
self.assertEqual(1, self.cse.annotation_count())
self.assertEqual(self.cse.annotations()[0],
annotation)
# TODO move to googility
# class TestCSEAddingPlaces(TestCase):
# def setUp(self):
# self.cse = CustomSearchEngine.from_string(FACETED_XML)
# def test_place_with_STATUS_ACTIVE_without_labels_not_in_cse(self):
# Place.objects.create(comment="Active Place",
# status=Annotation.STATUS.active)
# self.assertEqual(0, self.cse.annotation_count())
# def test_place_with_STATUS_ACTIVE_without_matching_label_not_in_cse(self):
# place = Place.objects.create(comment="Active Place",
# status=Annotation.STATUS.active)
# label = Label.objects.create()
# place.labels.add(label)
# place.save()
# self.assertEqual(0, self.cse.annotation_count())
# def test_place_with_matching_label_and_STATUS_SUBMITTED_not_in_cse(self):
# place = Place.objects.create(comment="Active Place",
# status=Annotation.STATUS.submitted)
# place.labels.add(self.cse.background_labels.all()[0])
# place.save()
# self.assertEqual(0, self.cse.annotation_count())
# def test_place_with_matching_label_and_STATUS_DELETED_not_in_cse(self):
# place = Place.objects.create(comment="Active Place",
# status=Annotation.STATUS.deleted)
# place.labels.add(self.cse.background_labels.all()[0])
# place.save()
# self.assertEqual(0, self.cse.annotation_count())
# def test_place_with_matching_label_and_STATUS_ACTIVE_in_cse(self):
# place = Place.objects.create(comment="Active Place",
# status=Annotation.STATUS.active)
# place.labels.add(self.cse.background_labels.all()[0])
# place.save()
# self.assertEqual(1, self.cse.annotation_count())
# self.assertEqual(self.cse.annotations()[0],
# place)
class TestAnnotationManager(TestCase):
def test_manager_annotations(self):
active = Annotation.objects.create(comment="Active Annotation",
status=Annotation.STATUS.active)
submitted = Annotation.objects.create(comment="Submitted Annotation",
status=Annotation.STATUS.submitted)
deleted = Annotation.objects.create(comment="Deleted Annotation",
status=Annotation.STATUS.deleted)
self.assertEqual(1, Annotation.objects.active().count())
self.assertEqual(active, Annotation.objects.active().all()[0])
self.assertEqual(1, Annotation.objects.submitted().count())
self.assertEqual(submitted, Annotation.objects.submitted().all()[0])
self.assertEqual(1, Annotation.objects.deleted().count())
self.assertEqual(deleted, Annotation.objects.deleted().all()[0])
# def test_manager_places(self):
# active = Place.objects.create(comment="Active Place",
# status=Annotation.STATUS.active)
# submitted = Place.objects.create(comment="Submitted Place",
# status=Annotation.STATUS.submitted)
# deleted = Place.objects.create(comment="Deleted Place",
# status=Annotation.STATUS.deleted)
# self.assertEqual(1, Annotation.objects.active().count())
# self.assertEqual(active, Annotation.objects.active().all()[0])
# self.assertEqual(1, Annotation.objects.submitted().count())
# self.assertEqual(submitted, Annotation.objects.submitted().all()[0])
# self.assertEqual(1, Annotation.objects.deleted().count())
# self.assertEqual(deleted, Annotation.objects.deleted().all()[0])
class TestAnnotationParsing(TestCase):
def setUp(self):
self.handler = AnnotationSAXHandler()
def test_empty_annotations_produces_no_annotations(self):
self.assertEqual([], self.handler.parseString('<Annotations></Annotations>'))
def test_parse_annotations_from_string(self):
annotations = Annotation.from_string(ANNOTATION_XML)
self._validate(annotations)
def test_parse_annotations_using_sax_handler(self):
annotations = self.handler.parseString(ANNOTATION_XML)
self._validate(annotations)
def _validate(self, annotations):
self.assertEqual(6, len(annotations))
a0 = annotations[0]
self.assertEqual("tech.agilitynerd.com/author/", a0.about)
self.assertEqual("tech.agilitynerd.com/author/", a0.original_url)
self.assertEqual("", a0.comment)
self.assertEqual("_cse_exclude_keystring", a0.labels.all()[0].name)
a1 = annotations[1]
self.assertEqual("tech.agilitynerd.com/archives.html", a1.about)
self.assertEqual("tech.agilitynerd.com/archives.html", a1.original_url)
self.assertEqual("", a1.comment)
self.assertEqual("_cse_exclude_keystring", a1.labels.all()[0].name)
a2 = annotations[2]
self.assertEqual("tech.agilitynerd.com/category/", a2.about)
self.assertEqual("tech.agilitynerd.com/category/", a2.original_url)
self.assertEqual("", a2.comment)
self.assertEqual("_cse_exclude_keystring", a2.labels.all()[0].name)
a3 = annotations[3]
self.assertEqual("tech.agilitynerd.com/tag/", a3.about)
self.assertEqual("tech.agilitynerd.com/tag/", a3.original_url)
self.assertEqual("", a3.comment)
self.assertEqual("_cse_exclude_keystring", a3.labels.all()[0].name)
a4 = annotations[4]
self.assertEqual("tech.agilitynerd.com/*", a4.about)
# strip trailing asterisk
self.assertEqual("tech.agilitynerd.com", a4.original_url)
self.assertEqual("", a4.comment)
self.assertEqual("_cse_adifferentkeystring", a4.labels.all()[0].name)
a5 = annotations[5]
self.assertEqual("tech.agilitynerd.com/*", a5.about)
# strip trailing asterisk
self.assertEqual("tech.agilitynerd.com/", a5.original_url)
self.assertEqual("here's a comment", a5.comment)
self.assertEqual(Annotation.STATUS.active, a5.status)
self.assertEqual("_cse_keystring", a5.labels.all()[0].name)
class AnnotationSAXHandlerTests(TestCase):
def testParseWithAmpersand(self):
xml = '''<Annotations file="./clubsxml">
<Annotation about="www.luckydogagility.com/*">
<Label name="_cse_kueofys2mdy" />
<Label name="facility" />
<AdditionalData attribute="original_url" value="http://www.luckydogagility.com/" />
<Comment>Lucky Dog & Friends Agility</Comment>
</Annotation>
<Annotation about="agilitynerd.com/blog/*">
<Label name="_cse_kueofys2mdy" />
<Label name="blog" />
<AdditionalData attribute="original_url" value="http://agilitynerd.com/blog/" />
<Comment>AgilityNerd Dog Agility Blog</Comment>
</Annotation>
</Annotations>'''
curHandler = AnnotationSAXHandler()
annotations = curHandler.parseString(xml)
self.assertEqual(len(annotations), 2)
# is ampersand no longer encoded?
annotation = annotations[0]
self.assertEqual(annotation.comment, 'Lucky Dog & Friends Agility')
self.assertEqual(annotation.original_url, 'http://www.luckydogagility.com/')
self.assertEqual(annotation.about, 'www.luckydogagility.com/*')
annotation = annotations[1]
self.assertEqual(annotation.comment, 'AgilityNerd Dog Agility Blog')
self.assertEqual(annotation.original_url, 'http://agilitynerd.com/blog/')
self.assertEqual(annotation.about, 'agilitynerd.com/blog/*')
class AnnotationsLabelsLinks(TestCase):
def setUp(self):
active = Annotation.objects.create(comment="Active Annotation",
status=Annotation.STATUS.active)
label = Label.objects.create(name="Label & Name",
background=False)
active.labels.add(label)
label = Label.objects.create(name="Background Label",
background=True)
active.labels.add(label)
self.annotation = active
def test_labels_as_links_shows_all_labels(self):
self.assertEqual('<a class="label-link" href="/labels/2/">Background Label</a><a class="label-link" href="/labels/1/">Label & Name</a>',
self.annotation.labels_as_links())
def test_labels_as_links_hides_background_labels(self):
self.assertEqual('<a class="label-link" href="/labels/1/">Label & Name</a>',
self.annotation.labels_as_links(include_background_labels=False))
class AnnotationsLabels(TestCase):
def test_guess_google_url_for_a_single_page(self):
self.assertEqual('example.com/foo.html',
Annotation.guess_google_url("http://example.com/foo.html"))
def test_guess_google_url_for_a_terminated_path(self):
self.assertEqual('example.com/foo/*',
Annotation.guess_google_url("http://example.com/foo/"))
def test_guess_google_url_for_a_nonterminated_path(self):
self.assertEqual('example.com/foo/*',
Annotation.guess_google_url("http://example.com/foo"))
class AnnotationsAlphaList(TestCase):
def setUp(self):
self.expected = [{'i': x, 'style': 'disabled'} for x in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789']
def test_no_annotations_no_active_letter(self):
results = Annotation.alpha_list()
self.assertEqual(self.expected, results)
def test_two_annotations_two_active_letters(self):
f = Annotation.objects.create(comment="Fun with Python")
five = Annotation.objects.create(comment="5 Python Anti-Patterns")
results = Annotation.alpha_list()
self.expected[5]['style'] = 'active'
self.expected[-5]['style'] = 'active'
self.assertEqual(self.expected, results)
def test_two_annotations_two_active_letters_one_inactive_selected(self):
f = Annotation.objects.create(comment="Fun with Python")
five = Annotation.objects.create(comment="5 Python Anti-Patterns")
results = Annotation.alpha_list(selection="B")
self.expected[1]['style'] = 'selected'
self.expected[5]['style'] = 'active'
self.expected[-5]['style'] = 'active'
self.assertEqual(self.expected, results)
| |
"""Identify program versions used for analysis, reporting in structured table.
Catalogs the full list of programs used in analysis, enabling reproduction of
results and tracking of provenance in output files.
"""
import os
import contextlib
import subprocess
import sys
import yaml
import toolz as tz
from bcbio import utils
from bcbio.pipeline import config_utils, version
from bcbio.pipeline import datadict as dd
from bcbio.log import logger
_cl_progs = [{"cmd": "bamtofastq", "name": "biobambam",
"args": "--version", "stdout_flag": "This is biobambam version"},
{"cmd": "bamtools", "args": "--version", "stdout_flag": "bamtools"},
{"cmd": "bcftools", "stdout_flag": "Version:"},
{"cmd": "bedtools", "args": "--version", "stdout_flag": "bedtools"},
{"cmd": "bowtie2", "args": "--version", "stdout_flag": "bowtie2-align version"},
{"cmd": "bwa", "stdout_flag": "Version:"},
{"cmd": "chanjo"},
{"cmd": "cufflinks", "stdout_flag": "cufflinks"},
{"cmd": "cutadapt", "args": "--version"},
{"cmd": "fastqc", "args": "--version", "stdout_flag": "FastQC"},
{"cmd": "freebayes", "stdout_flag": "version:"},
{"cmd": "gemini", "args": "--version", "stdout_flag": "gemini "},
{"cmd": "novosort", "paren_flag": "novosort"},
{"cmd": "novoalign", "stdout_flag": "Novoalign"},
{"cmd": "samtools", "stdout_flag": "Version:"},
{"cmd": "sambamba", "stdout_flag": "sambamba"},
{"cmd": "qualimap", "args": "-h", "stdout_flag": "QualiMap"},
{"cmd": "vcflib", "has_cl_version": False},
{"cmd": "featurecounts", "args": "-v", "stdout_flag": "featureCounts"}]
def _broad_versioner(type):
def get_version(config):
from bcbio import broad
try:
runner = broad.runner_from_config(config)
except ValueError:
return ""
if type == "gatk":
return runner.get_gatk_version()
elif type == "picard":
return runner.get_picard_version("ViewSam")
elif type == "mutect":
try:
runner = broad.runner_from_config(config, "mutect")
except ValueError:
return ""
return runner.get_mutect_version()
else:
raise NotImplementedError(type)
return get_version
def jar_versioner(program_name, jar_name):
"""Retrieve version information based on jar file.
"""
def get_version(config):
try:
pdir = config_utils.get_program(program_name, config, "dir")
# not configured
except ValueError:
return ""
jar = os.path.basename(config_utils.get_jar(jar_name, pdir))
for to_remove in [jar_name, ".jar", "-standalone"]:
jar = jar.replace(to_remove, "")
if jar.startswith(("-", ".")):
jar = jar[1:]
if jar is "":
logger.warn("Unable to determine version for program '{}' from jar file {}".format(
program_name, config_utils.get_jar(jar_name, pdir)))
return jar
return get_version
def java_versioner(pname, jar_name, **kwargs):
def get_version(config):
try:
pdir = config_utils.get_program(pname, config, "dir")
except ValueError:
return ""
jar = config_utils.get_jar(jar_name, pdir)
kwargs["cmd"] = "java"
kwargs["args"] = "-Xms128m -Xmx256m -jar %s" % jar
return _get_cl_version(kwargs, config)
return get_version
_alt_progs = [{"name": "bcbio_variation",
"version_fn": jar_versioner("bcbio_variation", "bcbio.variation")},
{"name": "gatk", "version_fn": _broad_versioner("gatk")},
{"name": "mutect",
"version_fn": _broad_versioner("mutect")},
{"name": "picard", "version_fn": _broad_versioner("picard")},
{"name": "rnaseqc",
"version_fn": jar_versioner("rnaseqc", "RNA-SeQC")},
{"name": "snpeff",
"version_fn": java_versioner("snpeff", "snpEff", stdout_flag="snpEff version SnpEff")},
{"name": "varscan",
"version_fn": jar_versioner("varscan", "VarScan")},
{"name": "oncofuse",
"version_fn": jar_versioner("Oncofuse", "Oncofuse")},
{"name": "alientrimmer",
"version_fn": jar_versioner("AlienTrimmer", "AlienTrimmer")}
]
def _parse_from_stdoutflag(stdout, x):
for line in stdout:
if line.find(x) >= 0:
parts = [p for p in line[line.find(x) + len(x):].split() if p.strip()]
return parts[0].strip()
return ""
def _parse_from_parenflag(stdout, x):
for line in stdout:
if line.find(x) >= 0:
return line.split("(")[-1].split(")")[0]
return ""
def _get_cl_version(p, config):
"""Retrieve version of a single commandline program.
"""
if not p.get("has_cl_version", True):
return ""
try:
prog = config_utils.get_program(p["cmd"], config)
except config_utils.CmdNotFound:
localpy_cmd = os.path.join(os.path.dirname(sys.executable), p["cmd"])
if os.path.exists(localpy_cmd):
prog = localpy_cmd
else:
return ""
args = p.get("args", "")
cmd = "{prog} {args}"
subp = subprocess.Popen(cmd.format(**locals()), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
with contextlib.closing(subp.stdout) as stdout:
if p.get("stdout_flag"):
v = _parse_from_stdoutflag(stdout, p["stdout_flag"])
elif p.get("paren_flag"):
v = _parse_from_parenflag(stdout, p["paren_flag"])
else:
lines = [l.strip() for l in stdout.read().split("\n") if l.strip()]
v = lines[-1]
if v.endswith("."):
v = v[:-1]
return v
def _get_brew_versions():
"""Retrieve versions of tools installed via brew.
"""
from bcbio import install
tooldir = install.get_defaults().get("tooldir")
brew_cmd = os.path.join(tooldir, "bin", "brew") if tooldir else "brew"
try:
vout = subprocess.check_output([brew_cmd, "list", "--versions"])
except OSError: # brew not installed/used
vout = ""
out = {}
for vstr in vout.split("\n"):
if vstr.strip():
parts = vstr.rstrip().split()
name = parts[0]
v = parts[-1]
out[name] = v
return out
def _get_versions(config=None):
"""Retrieve details on all programs available on the system.
"""
out = [{"program": "bcbio-nextgen",
"version": ("%s-%s" % (version.__version__, version.__git_revision__)
if version.__git_revision__ else version.__version__)}]
manifest_dir = _get_manifest_dir(config)
manifest_vs = _get_versions_manifest(manifest_dir)
if manifest_vs:
return out + manifest_vs
else:
assert config is not None, "Need configuration to retrieve from non-manifest installs"
brew_vs = _get_brew_versions()
for p in _cl_progs:
out.append({"program": p["cmd"],
"version": (brew_vs[p["cmd"]] if p["cmd"] in brew_vs else
_get_cl_version(p, config))})
for p in _alt_progs:
out.append({"program": p["name"],
"version": (brew_vs[p["name"]] if p["name"] in brew_vs else
p["version_fn"](config))})
return out
def _get_manifest_dir(data=None):
"""
get manifest directory from the data dictionary, falling back on alternatives
it prefers, in order:
1. locating it from the bcbio_system.yaml file
2. locating it from the galaxy directory
3. location it from the python executable.
it can accept either the data or config dictionary
"""
manifest_dir = None
if data:
bcbio_system = tz.get_in(["config", "bcbio_system"], data, None)
bcbio_system = bcbio_system if bcbio_system else data.get("bcbio_system", None)
if bcbio_system:
sibling_dir = os.path.normpath(os.path.dirname(bcbio_system))
else:
sibling_dir = dd.get_galaxy_dir(data)
if sibling_dir:
manifest_dir = os.path.normpath(os.path.join(sibling_dir, os.pardir,
"manifest"))
if not manifest_dir or not os.path.exists(manifest_dir):
manifest_dir = os.path.join(config_utils.get_base_installdir(), "manifest")
return manifest_dir
def _get_versions_manifest(manifest_dir):
"""Retrieve versions from a pre-existing manifest of installed software.
"""
all_pkgs = ["htseq", "cn.mops", "vt", "platypus-variant", "gatk-framework", "samblaster"] + \
[p.get("name", p["cmd"]) for p in _cl_progs] + [p["name"] for p in _alt_progs]
if os.path.exists(manifest_dir):
out = []
for plist in ["toolplus", "brew", "python", "r", "debian", "custom"]:
pkg_file = os.path.join(manifest_dir, "%s-packages.yaml" % plist)
if os.path.exists(pkg_file):
with open(pkg_file) as in_handle:
pkg_info = yaml.safe_load(in_handle)
added = []
for pkg in all_pkgs:
if pkg in pkg_info:
added.append(pkg)
out.append({"program": pkg, "version": pkg_info[pkg]["version"]})
for x in added:
all_pkgs.remove(x)
out.sort(key=lambda x: x["program"])
for pkg in all_pkgs:
out.append({"program": pkg, "version": ""})
return out
def _get_program_file(dirs):
if dirs.get("work"):
base_dir = utils.safe_makedir(os.path.join(dirs["work"], "provenance"))
return os.path.join(base_dir, "programs.txt")
def write_versions(dirs, config=None, is_wrapper=False):
"""Write CSV file with versions used in analysis pipeline.
"""
out_file = _get_program_file(dirs)
if is_wrapper:
assert utils.file_exists(out_file), "Failed to create program versions from VM"
elif out_file is None:
for p in _get_versions(config):
print("{program},{version}".format(**p))
else:
with open(out_file, "w") as out_handle:
for p in _get_versions(config):
out_handle.write("{program},{version}\n".format(**p))
return out_file
def get_version_manifest(name, data=None, required=False):
"""Retrieve a version from the currently installed manifest.
"""
manifest_dir = _get_manifest_dir(data)
manifest_vs = _get_versions_manifest(manifest_dir)
for x in manifest_vs:
if x["program"] == name:
v = x.get("version", "")
if v:
return v
if required:
raise ValueError("Did not find %s in install manifest. Could not check version." % name)
return ""
def add_subparser(subparsers):
"""Add command line option for exporting version information.
"""
parser = subparsers.add_parser("version",
help="Export versions of used software to stdout or a file ")
parser.add_argument("--workdir", help="Directory export programs to in workdir/provenance/programs.txt",
default=None)
def get_version(name, dirs=None, config=None):
"""Retrieve the current version of the given program from cached names.
"""
if dirs:
p = _get_program_file(dirs)
else:
p = config["resources"]["program_versions"]
with open(p) as in_handle:
for line in in_handle:
prog, version = line.rstrip().split(",")
if prog == name and version:
return version
raise KeyError("Version information not found for %s in %s" % (name, p))
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from abc import abstractmethod, ABCMeta
from pyspark import since, keyword_only
from pyspark.ml.wrapper import JavaParams
from pyspark.ml.param import Param, Params, TypeConverters
from pyspark.ml.param.shared import HasLabelCol, HasPredictionCol, HasProbabilityCol, \
HasRawPredictionCol, HasFeaturesCol, HasWeightCol
from pyspark.ml.common import inherit_doc
from pyspark.ml.util import JavaMLReadable, JavaMLWritable
__all__ = ['Evaluator', 'BinaryClassificationEvaluator', 'RegressionEvaluator',
'MulticlassClassificationEvaluator', 'MultilabelClassificationEvaluator',
'ClusteringEvaluator', 'RankingEvaluator']
@inherit_doc
class Evaluator(Params):
"""
Base class for evaluators that compute metrics from predictions.
.. versionadded:: 1.4.0
"""
__metaclass__ = ABCMeta
@abstractmethod
def _evaluate(self, dataset):
"""
Evaluates the output.
:param dataset: a dataset that contains labels/observations and
predictions
:return: metric
"""
raise NotImplementedError()
@since("1.4.0")
def evaluate(self, dataset, params=None):
"""
Evaluates the output with optional parameters.
:param dataset: a dataset that contains labels/observations and
predictions
:param params: an optional param map that overrides embedded
params
:return: metric
"""
if params is None:
params = dict()
if isinstance(params, dict):
if params:
return self.copy(params)._evaluate(dataset)
else:
return self._evaluate(dataset)
else:
raise ValueError("Params must be a param map but got %s." % type(params))
@since("1.5.0")
def isLargerBetter(self):
"""
Indicates whether the metric returned by :py:meth:`evaluate` should be maximized
(True, default) or minimized (False).
A given evaluator may support multiple metrics which may be maximized or minimized.
"""
return True
@inherit_doc
class JavaEvaluator(JavaParams, Evaluator):
"""
Base class for :py:class:`Evaluator`s that wrap Java/Scala
implementations.
"""
__metaclass__ = ABCMeta
def _evaluate(self, dataset):
"""
Evaluates the output.
:param dataset: a dataset that contains labels/observations and predictions.
:return: evaluation metric
"""
self._transfer_params_to_java()
return self._java_obj.evaluate(dataset._jdf)
def isLargerBetter(self):
self._transfer_params_to_java()
return self._java_obj.isLargerBetter()
@inherit_doc
class BinaryClassificationEvaluator(JavaEvaluator, HasLabelCol, HasRawPredictionCol, HasWeightCol,
JavaMLReadable, JavaMLWritable):
"""
Evaluator for binary classification, which expects two input columns: rawPrediction and label.
The rawPrediction column can be of type double (binary 0/1 prediction, or probability of label
1) or of type vector (length-2 vector of raw predictions, scores, or label probabilities).
>>> from pyspark.ml.linalg import Vectors
>>> scoreAndLabels = map(lambda x: (Vectors.dense([1.0 - x[0], x[0]]), x[1]),
... [(0.1, 0.0), (0.1, 1.0), (0.4, 0.0), (0.6, 0.0), (0.6, 1.0), (0.6, 1.0), (0.8, 1.0)])
>>> dataset = spark.createDataFrame(scoreAndLabels, ["raw", "label"])
...
>>> evaluator = BinaryClassificationEvaluator()
>>> evaluator.setRawPredictionCol("raw")
BinaryClassificationEvaluator...
>>> evaluator.evaluate(dataset)
0.70...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "areaUnderPR"})
0.83...
>>> bce_path = temp_path + "/bce"
>>> evaluator.save(bce_path)
>>> evaluator2 = BinaryClassificationEvaluator.load(bce_path)
>>> str(evaluator2.getRawPredictionCol())
'raw'
>>> scoreAndLabelsAndWeight = map(lambda x: (Vectors.dense([1.0 - x[0], x[0]]), x[1], x[2]),
... [(0.1, 0.0, 1.0), (0.1, 1.0, 0.9), (0.4, 0.0, 0.7), (0.6, 0.0, 0.9),
... (0.6, 1.0, 1.0), (0.6, 1.0, 0.3), (0.8, 1.0, 1.0)])
>>> dataset = spark.createDataFrame(scoreAndLabelsAndWeight, ["raw", "label", "weight"])
...
>>> evaluator = BinaryClassificationEvaluator(rawPredictionCol="raw", weightCol="weight")
>>> evaluator.evaluate(dataset)
0.70...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "areaUnderPR"})
0.82...
>>> evaluator.getNumBins()
1000
.. versionadded:: 1.4.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation (areaUnderROC|areaUnderPR)",
typeConverter=TypeConverters.toString)
numBins = Param(Params._dummy(), "numBins", "Number of bins to down-sample the curves "
"(ROC curve, PR curve) in area computation. If 0, no down-sampling will "
"occur. Must be >= 0.",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, rawPredictionCol="rawPrediction", labelCol="label",
metricName="areaUnderROC", weightCol=None, numBins=1000):
"""
__init__(self, rawPredictionCol="rawPrediction", labelCol="label", \
metricName="areaUnderROC", weightCol=None, numBins=1000)
"""
super(BinaryClassificationEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.BinaryClassificationEvaluator", self.uid)
self._setDefault(metricName="areaUnderROC", numBins=1000)
kwargs = self._input_kwargs
self._set(**kwargs)
@since("1.4.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("1.4.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@since("3.0.0")
def setNumBins(self, value):
"""
Sets the value of :py:attr:`numBins`.
"""
return self._set(numBins=value)
@since("3.0.0")
def getNumBins(self):
"""
Gets the value of numBins or its default value.
"""
return self.getOrDefault(self.numBins)
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
def setRawPredictionCol(self, value):
"""
Sets the value of :py:attr:`rawPredictionCol`.
"""
return self._set(rawPredictionCol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@keyword_only
@since("1.4.0")
def setParams(self, rawPredictionCol="rawPrediction", labelCol="label",
metricName="areaUnderROC", weightCol=None, numBins=1000):
"""
setParams(self, rawPredictionCol="rawPrediction", labelCol="label", \
metricName="areaUnderROC", weightCol=None, numBins=1000)
Sets params for binary classification evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class RegressionEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol, HasWeightCol,
JavaMLReadable, JavaMLWritable):
"""
Evaluator for Regression, which expects input columns prediction, label
and an optional weight column.
>>> scoreAndLabels = [(-28.98343821, -27.0), (20.21491975, 21.5),
... (-25.98418959, -22.0), (30.69731842, 33.0), (74.69283752, 71.0)]
>>> dataset = spark.createDataFrame(scoreAndLabels, ["raw", "label"])
...
>>> evaluator = RegressionEvaluator()
>>> evaluator.setPredictionCol("raw")
RegressionEvaluator...
>>> evaluator.evaluate(dataset)
2.842...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "r2"})
0.993...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "mae"})
2.649...
>>> re_path = temp_path + "/re"
>>> evaluator.save(re_path)
>>> evaluator2 = RegressionEvaluator.load(re_path)
>>> str(evaluator2.getPredictionCol())
'raw'
>>> scoreAndLabelsAndWeight = [(-28.98343821, -27.0, 1.0), (20.21491975, 21.5, 0.8),
... (-25.98418959, -22.0, 1.0), (30.69731842, 33.0, 0.6), (74.69283752, 71.0, 0.2)]
>>> dataset = spark.createDataFrame(scoreAndLabelsAndWeight, ["raw", "label", "weight"])
...
>>> evaluator = RegressionEvaluator(predictionCol="raw", weightCol="weight")
>>> evaluator.evaluate(dataset)
2.740...
>>> evaluator.getThroughOrigin()
False
.. versionadded:: 1.4.0
"""
metricName = Param(Params._dummy(), "metricName",
"""metric name in evaluation - one of:
rmse - root mean squared error (default)
mse - mean squared error
r2 - r^2 metric
mae - mean absolute error
var - explained variance.""",
typeConverter=TypeConverters.toString)
throughOrigin = Param(Params._dummy(), "throughOrigin",
"whether the regression is through the origin.",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, predictionCol="prediction", labelCol="label",
metricName="rmse", weightCol=None, throughOrigin=False):
"""
__init__(self, predictionCol="prediction", labelCol="label", \
metricName="rmse", weightCol=None, throughOrigin=False)
"""
super(RegressionEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.RegressionEvaluator", self.uid)
self._setDefault(metricName="rmse", throughOrigin=False)
kwargs = self._input_kwargs
self._set(**kwargs)
@since("1.4.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("1.4.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@since("3.0.0")
def setThroughOrigin(self, value):
"""
Sets the value of :py:attr:`throughOrigin`.
"""
return self._set(throughOrigin=value)
@since("3.0.0")
def getThroughOrigin(self):
"""
Gets the value of throughOrigin or its default value.
"""
return self.getOrDefault(self.throughOrigin)
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@keyword_only
@since("1.4.0")
def setParams(self, predictionCol="prediction", labelCol="label",
metricName="rmse", weightCol=None, throughOrigin=False):
"""
setParams(self, predictionCol="prediction", labelCol="label", \
metricName="rmse", weightCol=None, throughOrigin=False)
Sets params for regression evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class MulticlassClassificationEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol, HasWeightCol,
HasProbabilityCol, JavaMLReadable, JavaMLWritable):
"""
Evaluator for Multiclass Classification, which expects input
columns: prediction, label, weight (optional) and probabilityCol (only for logLoss).
>>> scoreAndLabels = [(0.0, 0.0), (0.0, 1.0), (0.0, 0.0),
... (1.0, 0.0), (1.0, 1.0), (1.0, 1.0), (1.0, 1.0), (2.0, 2.0), (2.0, 0.0)]
>>> dataset = spark.createDataFrame(scoreAndLabels, ["prediction", "label"])
>>> evaluator = MulticlassClassificationEvaluator()
>>> evaluator.setPredictionCol("prediction")
MulticlassClassificationEvaluator...
>>> evaluator.evaluate(dataset)
0.66...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "accuracy"})
0.66...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "truePositiveRateByLabel",
... evaluator.metricLabel: 1.0})
0.75...
>>> mce_path = temp_path + "/mce"
>>> evaluator.save(mce_path)
>>> evaluator2 = MulticlassClassificationEvaluator.load(mce_path)
>>> str(evaluator2.getPredictionCol())
'prediction'
>>> scoreAndLabelsAndWeight = [(0.0, 0.0, 1.0), (0.0, 1.0, 1.0), (0.0, 0.0, 1.0),
... (1.0, 0.0, 1.0), (1.0, 1.0, 1.0), (1.0, 1.0, 1.0), (1.0, 1.0, 1.0),
... (2.0, 2.0, 1.0), (2.0, 0.0, 1.0)]
>>> dataset = spark.createDataFrame(scoreAndLabelsAndWeight, ["prediction", "label", "weight"])
>>> evaluator = MulticlassClassificationEvaluator(predictionCol="prediction",
... weightCol="weight")
>>> evaluator.evaluate(dataset)
0.66...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "accuracy"})
0.66...
>>> predictionAndLabelsWithProbabilities = [
... (1.0, 1.0, 1.0, [0.1, 0.8, 0.1]), (0.0, 2.0, 1.0, [0.9, 0.05, 0.05]),
... (0.0, 0.0, 1.0, [0.8, 0.2, 0.0]), (1.0, 1.0, 1.0, [0.3, 0.65, 0.05])]
>>> dataset = spark.createDataFrame(predictionAndLabelsWithProbabilities, ["prediction",
... "label", "weight", "probability"])
>>> evaluator = MulticlassClassificationEvaluator(predictionCol="prediction",
... probabilityCol="probability")
>>> evaluator.setMetricName("logLoss")
MulticlassClassificationEvaluator...
>>> evaluator.evaluate(dataset)
0.9682...
.. versionadded:: 1.5.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation "
"(f1|accuracy|weightedPrecision|weightedRecall|weightedTruePositiveRate|"
"weightedFalsePositiveRate|weightedFMeasure|truePositiveRateByLabel|"
"falsePositiveRateByLabel|precisionByLabel|recallByLabel|fMeasureByLabel|"
"logLoss)",
typeConverter=TypeConverters.toString)
metricLabel = Param(Params._dummy(), "metricLabel",
"The class whose metric will be computed in truePositiveRateByLabel|"
"falsePositiveRateByLabel|precisionByLabel|recallByLabel|fMeasureByLabel."
" Must be >= 0. The default value is 0.",
typeConverter=TypeConverters.toFloat)
beta = Param(Params._dummy(), "beta",
"The beta value used in weightedFMeasure|fMeasureByLabel."
" Must be > 0. The default value is 1.",
typeConverter=TypeConverters.toFloat)
eps = Param(Params._dummy(), "eps",
"log-loss is undefined for p=0 or p=1, so probabilities are clipped to "
"max(eps, min(1 - eps, p)). "
"Must be in range (0, 0.5). The default value is 1e-15.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, predictionCol="prediction", labelCol="label",
metricName="f1", weightCol=None, metricLabel=0.0, beta=1.0,
probabilityCol="probability", eps=1e-15):
"""
__init__(self, predictionCol="prediction", labelCol="label", \
metricName="f1", weightCol=None, metricLabel=0.0, beta=1.0, \
probabilityCol="probability", eps=1e-15)
"""
super(MulticlassClassificationEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator", self.uid)
self._setDefault(metricName="f1", metricLabel=0.0, beta=1.0, eps=1e-15)
kwargs = self._input_kwargs
self._set(**kwargs)
@since("1.5.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("1.5.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@since("3.0.0")
def setMetricLabel(self, value):
"""
Sets the value of :py:attr:`metricLabel`.
"""
return self._set(metricLabel=value)
@since("3.0.0")
def getMetricLabel(self):
"""
Gets the value of metricLabel or its default value.
"""
return self.getOrDefault(self.metricLabel)
@since("3.0.0")
def setBeta(self, value):
"""
Sets the value of :py:attr:`beta`.
"""
return self._set(beta=value)
@since("3.0.0")
def getBeta(self):
"""
Gets the value of beta or its default value.
"""
return self.getOrDefault(self.beta)
@since("3.0.0")
def setEps(self, value):
"""
Sets the value of :py:attr:`eps`.
"""
return self._set(eps=value)
@since("3.0.0")
def getEps(self):
"""
Gets the value of eps or its default value.
"""
return self.getOrDefault(self.eps)
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@keyword_only
@since("1.5.0")
def setParams(self, predictionCol="prediction", labelCol="label",
metricName="f1", weightCol=None, metricLabel=0.0, beta=1.0,
probabilityCol="probability", eps=1e-15):
"""
setParams(self, predictionCol="prediction", labelCol="label", \
metricName="f1", weightCol=None, metricLabel=0.0, beta=1.0, \
probabilityCol="probability", eps=1e-15)
Sets params for multiclass classification evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class MultilabelClassificationEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for Multilabel Classification, which expects two input
columns: prediction and label.
>>> scoreAndLabels = [([0.0, 1.0], [0.0, 2.0]), ([0.0, 2.0], [0.0, 1.0]),
... ([], [0.0]), ([2.0], [2.0]), ([2.0, 0.0], [2.0, 0.0]),
... ([0.0, 1.0, 2.0], [0.0, 1.0]), ([1.0], [1.0, 2.0])]
>>> dataset = spark.createDataFrame(scoreAndLabels, ["prediction", "label"])
...
>>> evaluator = MultilabelClassificationEvaluator()
>>> evaluator.setPredictionCol("prediction")
MultilabelClassificationEvaluator...
>>> evaluator.evaluate(dataset)
0.63...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "accuracy"})
0.54...
>>> mlce_path = temp_path + "/mlce"
>>> evaluator.save(mlce_path)
>>> evaluator2 = MultilabelClassificationEvaluator.load(mlce_path)
>>> str(evaluator2.getPredictionCol())
'prediction'
.. versionadded:: 3.0.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation "
"(subsetAccuracy|accuracy|hammingLoss|precision|recall|f1Measure|"
"precisionByLabel|recallByLabel|f1MeasureByLabel|microPrecision|"
"microRecall|microF1Measure)",
typeConverter=TypeConverters.toString)
metricLabel = Param(Params._dummy(), "metricLabel",
"The class whose metric will be computed in precisionByLabel|"
"recallByLabel|f1MeasureByLabel. "
"Must be >= 0. The default value is 0.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, predictionCol="prediction", labelCol="label",
metricName="f1Measure", metricLabel=0.0):
"""
__init__(self, predictionCol="prediction", labelCol="label", \
metricName="f1Measure", metricLabel=0.0)
"""
super(MultilabelClassificationEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.MultilabelClassificationEvaluator", self.uid)
self._setDefault(metricName="f1Measure", metricLabel=0.0)
kwargs = self._input_kwargs
self._set(**kwargs)
@since("3.0.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("3.0.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@since("3.0.0")
def setMetricLabel(self, value):
"""
Sets the value of :py:attr:`metricLabel`.
"""
return self._set(metricLabel=value)
@since("3.0.0")
def getMetricLabel(self):
"""
Gets the value of metricLabel or its default value.
"""
return self.getOrDefault(self.metricLabel)
@since("3.0.0")
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@keyword_only
@since("3.0.0")
def setParams(self, predictionCol="prediction", labelCol="label",
metricName="f1Measure", metricLabel=0.0):
"""
setParams(self, predictionCol="prediction", labelCol="label", \
metricName="f1Measure", metricLabel=0.0)
Sets params for multilabel classification evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class ClusteringEvaluator(JavaEvaluator, HasPredictionCol, HasFeaturesCol,
JavaMLReadable, JavaMLWritable):
"""
Evaluator for Clustering results, which expects two input
columns: prediction and features. The metric computes the Silhouette
measure using the squared Euclidean distance.
The Silhouette is a measure for the validation of the consistency
within clusters. It ranges between 1 and -1, where a value close to
1 means that the points in a cluster are close to the other points
in the same cluster and far from the points of the other clusters.
>>> from pyspark.ml.linalg import Vectors
>>> featureAndPredictions = map(lambda x: (Vectors.dense(x[0]), x[1]),
... [([0.0, 0.5], 0.0), ([0.5, 0.0], 0.0), ([10.0, 11.0], 1.0),
... ([10.5, 11.5], 1.0), ([1.0, 1.0], 0.0), ([8.0, 6.0], 1.0)])
>>> dataset = spark.createDataFrame(featureAndPredictions, ["features", "prediction"])
...
>>> evaluator = ClusteringEvaluator()
>>> evaluator.setPredictionCol("prediction")
ClusteringEvaluator...
>>> evaluator.evaluate(dataset)
0.9079...
>>> ce_path = temp_path + "/ce"
>>> evaluator.save(ce_path)
>>> evaluator2 = ClusteringEvaluator.load(ce_path)
>>> str(evaluator2.getPredictionCol())
'prediction'
.. versionadded:: 2.3.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation (silhouette)",
typeConverter=TypeConverters.toString)
distanceMeasure = Param(Params._dummy(), "distanceMeasure", "The distance measure. " +
"Supported options: 'squaredEuclidean' and 'cosine'.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, predictionCol="prediction", featuresCol="features",
metricName="silhouette", distanceMeasure="squaredEuclidean"):
"""
__init__(self, predictionCol="prediction", featuresCol="features", \
metricName="silhouette", distanceMeasure="squaredEuclidean")
"""
super(ClusteringEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.ClusteringEvaluator", self.uid)
self._setDefault(metricName="silhouette", distanceMeasure="squaredEuclidean")
kwargs = self._input_kwargs
self._set(**kwargs)
@keyword_only
@since("2.3.0")
def setParams(self, predictionCol="prediction", featuresCol="features",
metricName="silhouette", distanceMeasure="squaredEuclidean"):
"""
setParams(self, predictionCol="prediction", featuresCol="features", \
metricName="silhouette", distanceMeasure="squaredEuclidean")
Sets params for clustering evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.3.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("2.3.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.4.0")
def getDistanceMeasure(self):
"""
Gets the value of `distanceMeasure`
"""
return self.getOrDefault(self.distanceMeasure)
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@inherit_doc
class RankingEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Evaluator for Ranking, which expects two input
columns: prediction and label.
>>> scoreAndLabels = [([1.0, 6.0, 2.0, 7.0, 8.0, 3.0, 9.0, 10.0, 4.0, 5.0],
... [1.0, 2.0, 3.0, 4.0, 5.0]),
... ([4.0, 1.0, 5.0, 6.0, 2.0, 7.0, 3.0, 8.0, 9.0, 10.0], [1.0, 2.0, 3.0]),
... ([1.0, 2.0, 3.0, 4.0, 5.0], [])]
>>> dataset = spark.createDataFrame(scoreAndLabels, ["prediction", "label"])
...
>>> evaluator = RankingEvaluator()
>>> evaluator.setPredictionCol("prediction")
RankingEvaluator...
>>> evaluator.evaluate(dataset)
0.35...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "precisionAtK", evaluator.k: 2})
0.33...
>>> ranke_path = temp_path + "/ranke"
>>> evaluator.save(ranke_path)
>>> evaluator2 = RankingEvaluator.load(ranke_path)
>>> str(evaluator2.getPredictionCol())
'prediction'
.. versionadded:: 3.0.0
"""
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation "
"(meanAveragePrecision|meanAveragePrecisionAtK|"
"precisionAtK|ndcgAtK|recallAtK)",
typeConverter=TypeConverters.toString)
k = Param(Params._dummy(), "k",
"The ranking position value used in meanAveragePrecisionAtK|precisionAtK|"
"ndcgAtK|recallAtK. Must be > 0. The default value is 10.",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, predictionCol="prediction", labelCol="label",
metricName="meanAveragePrecision", k=10):
"""
__init__(self, predictionCol="prediction", labelCol="label", \
metricName="meanAveragePrecision", k=10)
"""
super(RankingEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.RankingEvaluator", self.uid)
self._setDefault(metricName="meanAveragePrecision", k=10)
kwargs = self._input_kwargs
self._set(**kwargs)
@since("3.0.0")
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
return self._set(metricName=value)
@since("3.0.0")
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@since("3.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("3.0.0")
def getK(self):
"""
Gets the value of k or its default value.
"""
return self.getOrDefault(self.k)
@since("3.0.0")
def setLabelCol(self, value):
"""
Sets the value of :py:attr:`labelCol`.
"""
return self._set(labelCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@keyword_only
@since("3.0.0")
def setParams(self, predictionCol="prediction", labelCol="label",
metricName="meanAveragePrecision", k=10):
"""
setParams(self, predictionCol="prediction", labelCol="label", \
metricName="meanAveragePrecision", k=10)
Sets params for ranking evaluator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
if __name__ == "__main__":
import doctest
import tempfile
import pyspark.ml.evaluation
from pyspark.sql import SparkSession
globs = pyspark.ml.evaluation.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.evaluation tests")\
.getOrCreate()
globs['spark'] = spark
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| |
#!/usr/bin/env python3
"""An rsync wrapper to simplify syncing multiple filesystems from a master.
For each DIR argument given, the local path will be transferred to the same
absolute path on the remote hosts listed in the config file. For example,
"co.py /home/user/mydir" will transfer the contents of the "mydir" directory to
the matching path, "/home/user", on each of the remote hosts listed.
"""
import os
import sys
import argparse
import enum
import subprocess
import configparser
import re
VERSION = "0.1"
VERBOSE = False
DEBUG = False
def build_rsync_command(source, dest, host, excludes=None, erase=True,
output_flags=None, reverse=False):
if not excludes:
excludes = []
if not output_flags:
output_flags = []
command = ['rsync', '--inplace', '-ha']
if erase:
command.append('--delete')
for pattern in excludes:
command.append('--exclude')
command.append(pattern)
command.extend(output_flags)
if reverse:
full_source = "%s:%s" % (host, source)
full_dest = dest
else:
full_source = source
full_dest = "%s:%s" % (host, dest)
command.append(full_source)
command.append(full_dest)
return command
def sanitize_source(path):
"""Ensure the source does not have a trailing /"""
return path[:-1] if path.endswith('/') else path
def sanitize_dest(path):
"""Ensure the destination does have a trailing /"""
return path if path.endswith('/') else path + '/'
def match_host(match_string, config):
sane_stub = match_string.lower()
for index, hostname in config.iter_hosts():
sane_hostname = hostname.lower()
if (re.search(sane_stub, str(index))
or re.search(sane_stub, sane_hostname)):
return hostname
def main():
# Parse command line arguments
command_parser = CommandParser()
args = command_parser.parse()
dprint(args)
# Load the configuration file
cfg = Config(filename=args.config)
cfg.load()
# Perform the list hosts action and exit if the option is present
if args.list_hosts:
print("Hosts in config: '%s'" % cfg.filename)
for index, hostname in cfg.iter_hosts():
print(" %s. %s: %s" % (index, hostname, cfg.hosts[hostname]))
return 0
# Find appropriate defaults if they are not specified
if not args.dir:
if not args.reverse:
dir_arg = cfg.defaults_dir
if not args.dest:
dest_arg = cfg.defaults_dest
host_arg = args.host
else:
dir_arg = cfg.reverse_defaults_dir
if not args.dest:
dest_arg = cfg.reverse_defaults_dest
if not args.host:
host_arg = cfg.reverse_defaults_host
else:
dir_arg = args.dir
dest_arg = args.dest
host_arg = args.host
# determine the host destinations for the transfer
hosts = sorted(cfg.hosts.keys())
if host_arg:
hosts = [match_host(host_arg, cfg)]
if args.reverse and len(hosts) != 1:
print('Cannot reverse transfer direction for multiple hosts. '
'Please specify a single host using "-H" an try again.')
return 1
dprint('{dir_arg: %s, dest_arg: %s, hosts: %s}' % (dir_arg, dest_arg, hosts))
# For each path, and each host perform the transfer
for argument in dir_arg:
if not os.path.exists(argument):
print('Path "%s" does not appear to exist, skipping...' %
argument)
continue
argument = os.path.abspath(os.path.relpath(argument))
source_path = sanitize_source(argument)
if not dest_arg:
parent, _ = os.path.split(source_path)
dest_path = sanitize_dest(parent)
else:
dest_path = dest_arg
excludes = cfg.rsync_excludes if not args.no_excludes else []
for hostname in hosts:
print('Transfering "%s" to "%s" on %s...' %
(source_path, dest_path, hostname))
command = build_rsync_command(
source_path, dest_path, cfg.hosts[hostname], excludes,
args.erase, cfg.rsync_output_flags, args.reverse)
dprint('Running command: %s' % ' '.join(command))
if not args.dry_run:
subprocess.call(command, stderr=subprocess.PIPE)
return 0
class Config(object):
"""Wrapper interface for an INI Config Parser object."""
# Default Config Locations: Home dir, current dir, script's dir
default_filename = 'co.cfg'
default_paths = [os.path.join(os.environ['HOME'],
'.%s' % default_filename),
os.path.join('.', default_filename),
os.path.join(os.path.dirname(sys.argv[0]),
default_filename)]
def __init__(self, filename=None):
self.filename = filename
if not self.filename:
self.filename = self.default_paths
self._parser = configparser.ConfigParser()
def load(self):
self._parser.read(self.filename)
def get_string(self, sect, opt):
if (not self._parser.has_section(sect)
or opt not in self._parser.options(sect)):
return None
return self._parser.get(sect, opt)
def get_list(self, sect, opt):
if (not self._parser.has_section(sect)
or opt not in self._parser.options(sect)):
return None
return self._parser.get(sect, opt).split(' ')
@property
def hosts(self):
sect = 'hosts'
if not self._parser.has_section(sect):
return {}
return {opt: self._parser.get(sect, opt)
for opt in self._parser.options(sect)}
def iter_hosts(self):
for index, hostname in enumerate(sorted(self.hosts.keys())):
yield (index, hostname)
@property
def rsync_excludes(self):
return self.get_list('rsync_opts', 'excludes')
@property
def rsync_output_flags(self):
return self.get_list('rsync_opts', 'output_flags')
@property
def defaults_dir(self):
return self.get_string('defaults', 'dir')
@property
def defaults_dest(self):
return self.get_string('defaults', 'dest')
@property
def reverse_defaults_dir(self):
return self.get_string('reverse_defaults', 'dir')
@property
def reverse_defaults_dest(self):
return self.get_string('reverse_defaults', 'dest')
@property
def reverse_defaults_host(self):
return self.get_string('reverse_defaults', 'host')
class CommandParser(object):
def __init__(self):
self.arg_parser = argparse.ArgumentParser(description=__doc__)
def add_options(self):
self.arg_parser.add_argument(
'--version', help='Print the version and exit.', action='version',
version='%(prog)s {}'.format(VERSION))
DebugAction.add_parser_argument(self.arg_parser)
VerboseAction.add_parser_argument(self.arg_parser)
self.arg_parser.add_argument(
'-n', '--dry-run', action='store_true', dest='dry_run',
help="Print full output but do not run rsync.")
self.arg_parser.add_argument(dest='dir', metavar='DIR', nargs='*')
self.arg_parser.add_argument(
'-c', '--config', action='store', dest='config', metavar='CONFIG',
help=('Specify a config file to use. Default config paths: %s' %
' '.join(Config.default_paths)))
self.arg_parser.add_argument(
'--no-excludes', dest='no_excludes', action='store_true',
help="Don't use the rsync excludes option from the config file.")
self.arg_parser.add_argument(
'-e', '--erase', dest='erase', action='store_true',
help="Include the --erase option in the rsync commands.")
self.arg_parser.add_argument(
'-D', '--dest', dest='dest', metavar="DESTINATION",
help=("Specify an alternate destination path. Otherwise the "
"same source path is used on each host destination."))
self.arg_parser.add_argument(
'-r', '--reverse', dest='reverse', action='store_true',
help=('Reverse the transfer direction. Requires 1 host to be '
'specified with the "--host" option.'))
def add_host_options(self):
self.arg_parser.add_argument(
'-H', '--host', dest='host', metavar='HOST',
help=('Interact with only a single host from the config file. '
'Argument can be index of the host, or a substring '
'matching the name in the config file.'))
self.arg_parser.add_argument(
'-l', '--list-hosts', dest='list_hosts', action='store_true',
help='List the hosts available in the config file.')
def set_defaults(self):
self.arg_parser.set_defaults(dir=None, config=None, no_excludes=False,
erase=False, dry_run=False, dest=None,
host=None, list_hosts=False)
def parse(self):
self.add_options()
self.add_host_options()
self.set_defaults()
return self.arg_parser.parse_args()
def dprint(msg):
"""Conditionally print a debug message."""
if DEBUG:
print(msg)
def vprint(msg):
"""Conditionally print a verbose message."""
if VERBOSE:
print(msg)
class DebugAction(argparse.Action):
"""Enable the debugging output mechanism."""
short_flag = '-d'
flag = '--debug'
help = 'Enable debugging output.'
@classmethod
def add_parser_argument(cls, parser):
if hasattr(cls, 'short_flag') and cls.short_flag:
parser.add_argument(cls.short_flag, cls.flag, help=cls.help,
action=cls)
else:
parser.add_argument(cls.flag, help=cls.help, action=cls)
def __init__(self, option_strings, dest, **kwargs):
super(DebugAction, self).__init__(option_strings, dest, nargs=0,
default=False, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
# print('Enabling debugging output.')
global DEBUG
DEBUG = True
setattr(namespace, self.dest, True)
class VerboseAction(DebugAction):
"""Enable the verbose output mechanism."""
short_flag = '-v'
flag = '--verbose'
help = 'Enable verbose output.'
def __call__(self, parser, namespace, values, option_string=None):
# print('Enabling verbose output.')
global VERBOSE
VERBOSE = True
setattr(namespace, self.dest, True)
if __name__ == '__main__':
try:
sys.exit(main())
except SystemExit:
sys.exit(0)
except KeyboardInterrupt:
print('...interrupted by user, exiting.')
sys.exit(1)
except Exception as exc:
if DEBUG:
raise
else:
print('Unhandled Error:\n{}'.format(exc))
sys.exit(1)
| |
# A Python 2.7 library to simulate AB tests and analyze results.
###############################################################################
# Copyright 2016 Intuit
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import math, time, numpy, pandas
import scipy.stats
import matplotlib.pyplot as plt
from pandas import DataFrame, Series
### estimate and confidence interval methods for binomial distributions
def WaldEstimate(successes, trials, significance=0.05):
z = scipy.stats.norm.ppf(1 - significance / 2.)
value = 1.*successes / trials
interval = z*math.sqrt(value * (1 - value) / trials)
return value, interval
# Agresti-Coull
def ACEstimate(successes, trials, significance=0.05):
z = scipy.stats.norm.ppf(1 - significance / 2.)
value = 1.*(successes + z**2 / 2.) / (trials + z**2)
interval = z*math.sqrt(value*(1 - value) / (trials + z**2))
return value, interval
### scaling significance to account for multiple test reads
# fuction used to fit simulated results
def modifiedSignificance(numImpressions, significance, firstLook, scale):
numIndependentReads = (numpy.log(1.*numImpressions / firstLook) /
numpy.log(scale)) + 1
modSig = 1 - (1 - significance)**numIndependentReads
return modSig
### helper methods
# if the null hypothesis is rejected
def rejectNull(estimate, rate):
value = estimate[0]
interval = estimate[1]
return (rate < value - interval) or (rate > value + interval)
### core simulation
# simulates a single-proportion Z-test
# calculates type I error for a single test read (at end)
# and cumulative error for continuous test reads (after every impression)
def simulate(rates, significances, impressions, numTrials, firstLook=None,
estimateFunction=WaldEstimate, seed=None):
""" simulate a single-proportion Z-test
Args:
rate (list): success rates
significances (list): significance values (1 - confidence)
impressions (int or list): maximum impressions or list of number of
impressions
numTrials (int): number of independent simulations to aggregate over
firstLook (int): first impression at which experiment is evaluated for
continuous evaluation
(defaults to 1)
estimateFunction (function): binomal approximation to use
(defaults to Wald)
seed (int, optional): seed for random number generation
(defaults to current time)
Returns:
avgRejects (DataFrame): simulate single test read at end
avgAnyRejects (DataFrame): simulate conintuous test read after every impression
Both DataFrames contain the estimate and uncertainty on the type I error
(incorrect rejection of null hypothesis) for each rate, significance, and
impression value. Results are aggregated across numTrials independent
experiments.
"""
trials = range(numTrials)
base = [rates, significances, trials]
mi = pandas.MultiIndex.from_product(base, names=['rate', 'significance',
'trial'])
if seed is None:
numpy.random.seed(int(time.time()))
else:
numpy.random.seed(seed)
if type(impressions) == int:
points = range(1, impressions + 1)
else:
points = impressions
avgRejects = None
avgAnyRejects = None
for n in points:
if n <= 0:
raise ValueError("All values in impressions must be positive.")
draws = DataFrame(numpy.random.random([n, len(rates) *
len(significances) *
len(trials)]),
columns=mi)
draws.index = range(1, n + 1)
successes = draws.copy()
rejects = draws.copy()
for rate in rates:
successes[rate] = draws[rate].applymap(lambda x: int(x < rate))
cumSuccesses = successes.apply(numpy.core.fromnumeric.cumsum, raw=True)
cumImpressions = successes.index.values
for rate in rates:
for sig in significances:
for trial in trials:
vals = Series(zip(cumSuccesses.loc[:, (rate, sig, trial)].values,
cumImpressions))
vals.index = cumImpressions
rejects.loc[:, (rate, sig, trial)] = vals.apply(lambda x: \
int(rejectNull(estimateFunction(x[0], x[1], sig), rate)))
if firstLook is not None:
anyRejects = rejects.ix[firstLook:].max()
# apply binomial approximation to estimate type I error rate
if avgRejects is None:
avgRejects = rejects[-1:]. \
groupby(axis=1, level=['rate', 'significance']). \
sum(). \
applymap(lambda x: estimateFunction(x, numTrials))
else:
avgRejects.ix[n] = rejects[-1:]. \
groupby(axis=1, level=['rate', 'significance']). \
sum(). \
applymap(lambda x: estimateFunction(x, numTrials)). \
values[0]
# apply binomial approximation to estimate cumulative type I error rate
if firstLook is not None:
if avgAnyRejects is None:
avgAnyRejects = DataFrame(anyRejects. \
groupby(level=['rate', 'significance']). \
sum(). \
map(lambda x: estimateFunction(x, numTrials))). \
transpose()
avgAnyRejects.index = avgRejects.index.copy()
else:
avgAnyRejects.ix[n] = anyRejects. \
groupby(level=['rate', 'significance']). \
sum(). \
map(lambda x: estimateFunction(x, numTrials)). \
values
return avgRejects, avgAnyRejects
### plotting
def plotRejects(avgRejects, avgAnyRejects):
impressions = avgRejects.index
rates = avgRejects.columns.levels[0]
sigs = avgRejects.columns.levels[1]
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 1, 0), (0, 1, 1), (1, 0, 1),
(1, 1, 1)]
levels = [(val + 1.)/len(sigs) for val in range(len(sigs))]
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
rateIndex = 0
for (rate, baseColor) in zip(rates, colors[:len(rates)]):
for (sig, level) in zip(sigs, levels):
color = tuple(val*level for val in baseColor)
rejectsVals = avgRejects[rate][sig].apply(lambda x: x[0]).values
anyRejectsVals = avgAnyRejects[rate][sig]. \
apply(lambda x: x[0]).values
ax.plot(impressions, rejectsVals, color=color,
label="rate: %.3f; significance: %.3f" % (rate, sig))
ax.plot(impressions, anyRejectsVals, color=color, marker='x',
ls='--')
for sig in sigs:
ax.plot(impressions, [sig]*len(impressions), color='k', ls=':')
ax.set_xlim(0, max(impressions))
ax.set_ylim(0, 1)
ax.set_title('Average Reject Rate', fontsize=24)
ax.set_xlabel('# Impressions', fontsize=20)
ax.set_ylabel('% Rejects', fontsize=20)
ax.legend(loc=1, fontsize=18)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show()
| |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010-2011, Monash e-Research Centre
# (Monash University, Australia)
# Copyright (c) 2010-2011, VeRSI Consortium
# (Victorian eResearch Strategic Initiative, Australia)
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the VeRSI, the VeRSI Consortium members, nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
The METS document parser.
Experiment/Dataset/Datafile metadata of this format can be easily parsed
by this module...
<trd:saxdatafile xmlns:trd="http://www.tardis.edu.au/schemas/saxs/datafile/2010/08/10">
<trd:countingSecs>10.0</trd:countingSecs>
<trd:io>274389</trd:io>
<trd:ioBgnd>0</trd:ioBgnd>
<trd:it>284</trd:it>
<trd:itBgnd>0</trd:itBgnd>
<trd:ibs>665765</trd:ibs>
<trd:ibsBgnd>0</trd:ibsBgnd>
<trd:timeStampString>Fri Apr 16 03:53:30 2010</trd:timeStampString>
<trd:positionerString>UDEF1_2_PV1_2_3_4_5</trd:positionerString>
<trd:positionerValues>24.7410 24.9764 20.000 12.000 26.322 2.0007 1.2999</trd:positionerValues>
</trd:saxdatafile>
As you can see, the structure is quite flat. The structure above is the
recommended way of defining metadata fields for Experiment/Dataset/Datafile.
If however the metadata structure will require going down into a number of
descendants below the child elements of the main metadata element, you'll need
to provide a custom METS handler for it. The MX schema is a good example of a
schema that does not conform to the key-value pair format this parser module
recommends.
<trd:mxdatafile xmlns:trd="http://www.tardis.edu.au/schemas/trdDatafile/1">
<trd:imageType>R-AXIS</trd:imageType>
<trd:oscillationRange>
<trd:start>35.0</trd:start>
<trd:end>35.5</trd:end>
</trd:oscillationRange>
<trd:exposureTime>3.5166667</trd:exposureTime>
<trd:detectorSN>N/A</trd:detectorSN>
<trd:xrayWavelength>1.5418</trd:xrayWavelength>
<trd:directBeamXPos>149.59</trd:directBeamXPos>
<trd:directBeamYPos>150.5</trd:directBeamYPos>
<trd:detectorDistance>220.0</trd:detectorDistance>
<trd:imageSizeX>3000.0</trd:imageSizeX>
<trd:imageSizeY>3000.0</trd:imageSizeY>
<trd:pixelSizeX>0.1</trd:pixelSizeX>
<trd:pixelSizeY>0.1</trd:pixelSizeY>
<trd:twoTheta>0.0</trd:twoTheta>
</trd:mxdatafile>
.. moduleauthor:: Gerson Galang <gerson.galang@versi.edu.au>
"""
import hashlib
import logging
import re
from xml.sax import SAXParseException, ContentHandler
from xml.sax.handler import feature_namespaces
from xml.sax import make_parser
from tardis.tardis_portal import metsstruct
from tardis.tardis_portal import models
from tardis.tardis_portal.metshandler import store_metadata_value
from tardis.tardis_portal.staging import \
get_sync_root, get_sync_url_and_protocol
from django.conf import settings
logger = logging.getLogger(__name__)
class MetsDataHolder():
'''An instance of this class is used by MetsExperimentStructCreator and
MetsMetadataInfoHandler to pass information between each other.
'''
# holds the experiment Id that the DB has provided after the experiment
# has been saved
experimentDatabaseId = None
metadataMap = None
metsStructMap = None
class MetsExperimentStructCreator(ContentHandler):
'''The METS document parser which creates the experiment structure before
the MetsMetadataInfoHandler starts saving the metadata of the experiments,
datasets, and datafiles in the database.
'''
def __init__(self, holder):
self.holder = holder
self.experiment = None
self.inFileGrp = False
self.processExperimentStruct = False
self.processDatasetStruct = False
self.datafile = None
self.dataset = None
self.inStructMap = False
# holds a dictionary of objects (experiment, dataset, datafiles)
# that contains metadata information. this makes lookup on the objects
# based on the mets metadata id easier later on when the second parse
# is done.
self.metadataMap = {}
self.metsStructMap = {}
def startElementNS(self, name, qname, attrs):
# just get the element name without the namespace
elName = name[1]
if elName == 'fileGrp':
# flag that we are now in the fileGrp element which encapsulates
# the file listing
self.inFileGrp = True
self.datafilesMap = {}
elif elName == 'file' and self.inFileGrp:
fileName = _getAttrValueByQName(attrs, 'OWNERID')
fileId = _getAttrValueByQName(attrs, 'ID')
fileSize = _getAttrValueByQName(attrs, 'SIZE')
fileMetadataIds = _getAttrValueByQName(attrs, 'ADMID')
fileChecksumType = _getAttrValueByQName(attrs, 'CHECKSUMTYPE')
fileChecksum = _getAttrValueByQName(attrs, 'CHECKSUM')
# instantiate the datafile
self.datafile = metsstruct.Datafile(
fileId, fileName, fileSize, fileMetadataIds is not None and
fileMetadataIds.split() or None,
fileChecksumType, fileChecksum)
# add an entry for this datafile in the metadataMap so we can
# easily look it up later on when we do our second parse
if fileMetadataIds is not None:
for fileMetadataId in fileMetadataIds.split():
if fileMetadataId in self.metadataMap:
self.metadataMap[fileMetadataId].append(self.datafile)
else:
self.metadataMap[fileMetadataId] = [self.datafile]
elif elName == 'FLocat' and self.inFileGrp and \
_getAttrValueByQName(attrs, 'LOCTYPE') == 'URL':
# add the URL info to the datafile
fileUrl = _getAttrValue(attrs,
('http://www.w3.org/1999/xlink', 'href'))
self.datafile.url = fileUrl
elif elName == 'structMap':
self.inStructMap = True
elif elName == 'div' and \
self.inStructMap and \
_getAttrValueByQName(attrs, 'TYPE') == 'investigation':
# investigation maps to an experiment in the METS world
self.processExperimentStruct = True
# instantiate a new experiment
experimentId = _getAttrValueByQName(attrs, 'DMDID')
experimentMetadataIds = \
_getAttrValueByQName(attrs, 'ADMID')
self.experiment = metsstruct.Experiment(experimentId,
experimentMetadataIds is not None and
experimentMetadataIds.split() or None)
# add an entry for this experiment in the metadataMap so we can
# easily look it up later on when we do our second parse
if experimentMetadataIds is not None:
for experimentMetadataId in experimentMetadataIds.split():
if experimentMetadataId in self.metadataMap:
self.metadataMap[
experimentMetadataId].append(self.experiment)
else:
self.metadataMap[experimentMetadataId] = \
[self.experiment]
# we'll save all the div element entries in the metsStructMap so
# we can easily look them up when we do our second parse
self.metsStructMap[experimentId] = self.experiment
elif elName == 'div' and \
self.processExperimentStruct and \
_getAttrValueByQName(attrs, 'TYPE') == 'dataset':
self.processDatasetStruct = True
# instantiate a new dataset
datasetId = _getAttrValueByQName(attrs, 'DMDID')
datasetMetadataIds = _getAttrValueByQName(attrs, 'ADMID')
self.dataset = metsstruct.Dataset(datasetId,
datasetMetadataIds is not None and datasetMetadataIds.split()
or None)
# we'll also link the newly created dataset with the current
# experiment
self.dataset.experiment = self.experiment
# add an entry for this dataset in the metadataMap so we can
# easily look it up later on when we do our second parse
if datasetMetadataIds is not None:
for datasetMetadataId in datasetMetadataIds.split():
if datasetMetadataId in self.metadataMap:
self.metadataMap[
datasetMetadataId].append(self.dataset)
else:
self.metadataMap[datasetMetadataId] = [self.dataset]
# we'll save all the div element entries in the metsStructMap so
# we can easily look them up when we do our second parse
self.metsStructMap[datasetId] = self.dataset
elif elName == 'fptr' and self.processDatasetStruct:
fileId = _getAttrValueByQName(attrs, 'FILEID')
# now that we have the fileId attribute, let's add the actual
# datafile object from the datafilesMap into the dataset
self.dataset.datafiles.append(self.datafilesMap[fileId])
# we'll also need link this datafile with it's dataset holder
self.datafilesMap[fileId].dataset = self.dataset
def endElementNS(self, name, qname):
# just get the element name without the namespace
elName = name[1]
if elName == 'fileGrp':
self.inFileGrp = False
elif elName == 'file' and self.inFileGrp:
# save the datafile in the map then reset it to 'None'
self.datafilesMap[self.datafile.id] = self.datafile
self.datafile = None
elif elName == 'structMap':
# technically, we've finished doing our first pass here.
# at this point we can assume that we already have the experiment
# structure available that we can use on our second pass
self.holder.metsStructMap = self.metsStructMap
self.holder.metadataMap = self.metadataMap
self.inStructMap = False
# let try to have the datafilesMap garbage collected
self.datafilesMap = None
elif elName == 'div' and self.processDatasetStruct:
self.experiment.datasets.append(self.dataset)
self.dataset = None
self.processDatasetStruct = False
elif elName == 'div' and self.processExperimentStruct:
self.processExperimentStruct = False
def characters(self, chars):
# can't think of anything to do in here at the moment...
pass
class MetsMetadataInfoHandler(ContentHandler):
'''This parser does the actual database ingestion.
After the MetsExperimentStructCreator creates the experiment structure,
this parser goes through the METS document again to look up which file
the metadata belongs to, links the file and metadata and saves an entry of
the datafile in the database.
'''
def __init__(self, holder, tardisExpId, createdBy, syncRootDir):
self.holder = holder
self.tardisExpId = tardisExpId
self.createdBy = createdBy
self.syncRootDir = syncRootDir
self.inDmdSec = False
self.inName = False
self.inInstitution = False
self.inAmdSec = False
self.inTechMd = False
self.grabInstitution = False
self.grabTitle = False
self.metsObject = None
self.grabMightBeAuthor = False
self.grabRoleTerm = False
self.mightBeAuthor = None
self.grabExperimentUrl = False
self.grabStartTime = False
self.grabEndTime = False
self.processExperimentStruct = False
self.processDatasetStruct = False
self.processMetadata = False
# this will hold the techMD ID
self.metadataId = None
self.institution = None
self.grabAbstract = False
# a flag to tell if we are now inside techMD's xmlData element
self.inXmlData = False
# holds the current direct xmlData child element we are processing
self.xmlDataChildElement = None
self.parameterName = None
# a cache of the current experiment model object we are processing
self.modelExperiment = None
# a cache of the current dataset model object we are processing
self.modelDataset = None
# a cache of the current datafile being processed
self.modelDatafile = None
# this will hold the URI of the experiment/dataset/datafile
# metadata schema
self.elementNamespace = None
# this will be used to temporarily hold the metadata for the
# experiment, dataset or datafile
self.tempMetadataHolder = {}
# holds a quick lookup table of all the instantiated Dataset objects
# that the new instantiated datafiles will be linked to
self.datasetLookupDict = {}
# the custom parser (or handler) to use for the given metadata
self.customHandler = None
def startElementNS(self, name, qname, attrs):
# just get the element name without the namespace
elName = name[1]
if elName == 'dmdSec':
self.inDmdSec = True
structId = _getAttrValueByQName(attrs, 'ID')
self.metsObject = self.holder.metsStructMap[structId]
metsObjectClassName = self.metsObject.__class__.__name__
if metsObjectClassName == 'Experiment':
self.processExperimentStruct = True
elif metsObjectClassName == 'Dataset':
self.processDatasetStruct = True
else:
# we'll definitely have only either Experiment or Dataset
# object in the metsStructMap, if there are other type of
# objects that got saved in the map, we'll throw an exception
raise SAXParseException
elif elName == 'title' and self.inDmdSec:
self.grabTitle = True
elif elName == 'startTime' and self.processExperimentStruct:
self.grabStartTime = True
elif elName == 'endTime' and self.processExperimentStruct:
self.grabEndTime = True
elif elName == 'url' and self.processExperimentStruct:
self.grabExperimentUrl = True
elif elName == 'abstract' and self.processExperimentStruct:
self.grabAbstract = True
self.metsObject.description = ''
elif elName == 'name' and self.processExperimentStruct:
self.inName = True
elif elName == 'namePart' and self.inName:
self.grabMightBeAuthor = True
elif elName == 'roleTerm' and self.mightBeAuthor is not None:
self.grabRoleTerm = True
elif elName == 'agent':
agentRole = _getAttrValueByQName(attrs, 'ROLE')
agentType = _getAttrValueByQName(attrs, 'TYPE')
if agentRole == 'DISSEMINATOR' and agentType == 'ORGANIZATION':
self.inInstitution = True
elif elName == 'name' and self.inInstitution:
self.grabInstitution = True
elif elName == 'amdSec':
# let's start processing the metadata info..
self.inAmdSec = True
elif elName == 'techMD' and self.inAmdSec:
self.inTechMd = True
self.metadataId = _getAttrValueByQName(attrs, 'ID')
self.processMetadata = True
elif elName == 'xmlData' and self.inTechMd:
self.inXmlData = True
elif self.xmlDataChildElement is None and self.inXmlData:
self.xmlDataChildElement = elName
# let's reset the tempMetadataHolder dictionary for this new batch
# of datafile metadata
self.tempMetadataHolder = {}
self.elementNamespace = name[0]
# let's check if there's a custom parser that we should use for
# this metadata block (aka parameter set). if there is, use it!
from tardis.tardis_portal.metshandler import customHandlers
if self.elementNamespace in customHandlers:
self.customHandler = customHandlers[self.elementNamespace]
self.customHandler.resetMetadataDict()
elif self.customHandler is not None:
self.customHandler.startElement(elName, attrs)
elif self.parameterName is None and \
self.xmlDataChildElement is not None:
# let's save the metadata field name so we can handle it's value
# when we see its closing tag...
self.parameterName = elName
def endElementNS(self, name, qname):
# just get the element name without the namespace
elName = name[1]
if elName == 'dmdSec':
self.inDmdSec = False
# if we currently processing an experiment structure, let's
# save the institution value before we finalise the experiment
if self.processExperimentStruct:
self.metsObject.institution = self.institution
# let's save the experiment in the DB
if self.tardisExpId:
self.modelExperiment = models.Experiment.objects.get(
pk=self.tardisExpId)
else:
self.modelExperiment = models.Experiment()
self.modelExperiment.id = self.tardisExpId
self.modelExperiment.url = self.metsObject.url
self.modelExperiment.approved = True
self.modelExperiment.title = self.metsObject.title
self.modelExperiment.institution_name = \
self.metsObject.institution
self.modelExperiment.description = self.metsObject.description
self.modelExperiment.start_time = self.metsObject.start_time
self.modelExperiment.end_time = self.metsObject.end_time
self.modelExperiment.created_by = self.createdBy
self.modelExperiment.save()
self.holder.experimentDatabaseId = self.modelExperiment.id
x = 0
for author in self.metsObject.authors:
author_experiment = models.Author_Experiment(
experiment=self.modelExperiment,
author=author, order=x)
author_experiment.save()
x = x + 1
elif self.processDatasetStruct:
# let's save the dataset in the DB
self.modelDataset = models.Dataset(
description=self.metsObject.title,
immutable=settings.IMMUTABLE_METS_DATASETS)
self.modelDataset.save()
self.modelDataset.experiments.add(self.modelExperiment)
self.modelDataset.save()
# let's also save the modelDataset in a dictionary so that we
# can look it up easily later on when we start processing
# the datafiles.
self.datasetLookupDict[self.metsObject.id] = self.modelDataset
self.metsObject = None
self.processExperimentStruct = False
self.processDatasetStruct = False
elif elName == 'title' and self.inDmdSec:
self.grabTitle = False
elif elName == 'startTime' and self.processExperimentStruct:
self.grabStartTime = False
elif elName == 'endTime' and self.processExperimentStruct:
self.grabEndTime = False
elif elName == 'url' and self.processExperimentStruct:
self.grabExperimentUrl = False
elif elName == 'abstract' and self.processExperimentStruct:
self.grabAbstract = False
elif elName == 'name' and self.processExperimentStruct:
self.inName = False
elif elName == 'namePart' and self.inName:
self.grabMightBeAuthor = False
elif elName == 'roleTerm' and self.inName:
self.grabRoleTerm = False
self.mightBeAuthor = None
elif elName == 'name' and self.inInstitution:
self.grabInstitution = False
elif elName == 'agent':
self.inInstitution = False
elif elName == 'amdSec':
# we're done processing the metadata entries
self.inAmdSec = False
# let's reset the cached experiment model object
self.modelExperiment = None
elif elName == 'techMD' and self.inAmdSec:
self.inTechMd = False
self.metadataId = None
self.metsObject = None
self.processMetadata = False
elif elName == 'xmlData' and self.inTechMd:
self.inXmlData = False
elif elName != self.xmlDataChildElement and \
self.customHandler is not None:
self.customHandler.endElement(elName)
elif elName == self.xmlDataChildElement and self.inXmlData:
if self.customHandler is not None:
self.tempMetadataHolder = self.customHandler.metadataDict
try:
schema = models.Schema.objects.get(
namespace__exact=self.elementNamespace)
# get the associated parameter names for the given schema
parameterNames = \
models.ParameterName.objects.filter(
schema__namespace__exact=schema.namespace).order_by('id')
# let's create a trigger holder which we can use to check
# if we still need to create another parameterset entry in the
# DB
createParamSetFlag = {'experiment': True, 'dataset': True,
'datafile': True}
datasetParameterSet = None
datafileParameterSet = None
if self.metadataId in self.holder.metadataMap:
for metsObject in self.holder.metadataMap[self.metadataId]:
self.metsObject = metsObject
metsObjectClassName = self.metsObject.__class__.__name__
if metsObjectClassName == 'Experiment':
if createParamSetFlag['experiment']:
# create a new parameter set for the metadata
parameterSet = \
models.ExperimentParameterSet(
schema=schema,
experiment=self.modelExperiment)
parameterSet.save()
# now let's process the experiment parameters
for parameterName in parameterNames:
if parameterName.name in \
self.tempMetadataHolder:
parameterValues = self.tempMetadataHolder[
parameterName.name]
self._saveParameters('ExperimentParameter',
parameterName, parameterValues,
parameterSet)
createParamSetFlag['experiment'] = False
else:
# this is not even allowed as there's only going
# to be one experiment per METS file
raise Exception('forbidden state!')
elif metsObjectClassName == 'Dataset':
if createParamSetFlag['dataset']:
dataset = self.datasetLookupDict[
self.metsObject.id]
# create a new parameter set for the
# dataset metadata
datasetParameterSet = \
models.DatasetParameterSet(schema=schema,
dataset=dataset)
datasetParameterSet.save()
# now let's process the dataset parameters
for parameterName in parameterNames:
if parameterName.name in \
self.tempMetadataHolder:
parameterValues = self.tempMetadataHolder[
parameterName.name]
self._saveParameters('DatasetParameter',
parameterName, parameterValues,
datasetParameterSet)
# disable creation for the next visit
createParamSetFlag['dataset'] = False
elif metsObjectClassName == 'Datafile':
# this will be a good time to save the
# "hard" metadata of this datafile so that
# when we start adding "soft" metadata
# parameters to it, we already have an
# entry for it in the DB
# look up the dataset this file belongs to
thisFilesDataset = self.datasetLookupDict[
self.metsObject.dataset.id]
# also check if the file already exists
datafile = thisFilesDataset.dataset_file_set.filter(
filename=self.metsObject.name, size=self.metsObject.size)
if datafile.count() == 0:
size = self.metsObject.size
if not self.metsObject.size:
size = 0
def checksum(obj, type_):
# Check if the checksum is of type
if obj.checksumType != type_:
return ''
checksum = obj.checksum.lower()
# Ensure the checksum is hexdecimal
if not re.match('[0-9a-f]+$', checksum):
return ''
# Get algorithm
try:
name = type_.replace('-','').lower()
alg = getattr(hashlib, name)
except:
return ''
# Check checksum is the correct length
hex_length = alg('').digest_size * 2
if hex_length != len(checksum):
return ''
# Should be valid checksum of given type
return checksum
sync_url, proto = get_sync_url_and_protocol(
self.syncRootDir,
self.metsObject.url)
self.modelDatafile = models.Dataset_File(
dataset=thisFilesDataset,
filename=self.metsObject.name,
url=sync_url,
size=size,
md5sum=checksum(self.metsObject, 'MD5'),
sha512sum=checksum(self.metsObject,
'SHA-512'),
protocol=proto)
self.modelDatafile.save()
else:
self.modelDatafile = thisFilesDataset.dataset_file_set.get(
filename=self.metsObject.name, size=self.metsObject.size)
# TODO: we need to note here that we are
# only creating a datafile entry in the DB
# for files that have corresponding
# metadata. if we are to create a file
# entry for files with no metadata, we'll
# need to get the unaccessed datafiles
# from datasetLookupDict.
if createParamSetFlag['datafile']:
# create a new parameter set for the metadata
datafileParameterSet = \
models.DatafileParameterSet(schema=schema,
dataset_file=self.modelDatafile)
datafileParameterSet.save()
# now let's process the datafile parameters
for parameterName in parameterNames:
if parameterName.name in \
self.tempMetadataHolder:
parameterValues = self.tempMetadataHolder[
parameterName.name]
self._saveParameters('DatafileParameter',
parameterName, parameterValues,
datafileParameterSet)
createParamSetFlag['datafile'] = False
except models.Schema.DoesNotExist:
logger.warning('unsupported schema being ingested ' +
self.elementNamespace)
# reset the current xmlData child element so that if a new
# parameter set is read, we can process it again
self.xmlDataChildElement = None
self.customHandler = None
elif elName == self.parameterName and \
self.xmlDataChildElement is not None:
# reset self.parameterName to None so the next parameter can be
# processed
self.parameterName = None
def _saveParameters(self, parameterTypeClass, parameterName,
parameterValues, parameterSet):
'''Save the metadata field in the database.
Reference:
http://stackoverflow.com/questions/452969/does-python-have-an-equivalent-to-java-class-forname
'''
#logger.debug('saving parameter %s: %s' %
# (parameterName, parameterValue))
for parameterValue in parameterValues:
if parameterValue == '':
continue
if parameterName.isNumeric():
parameter = \
getattr(models, parameterTypeClass)(
parameterset=parameterSet,
name=parameterName,
string_value=None,
numerical_value=float(parameterValue))
else:
parameter = \
getattr(models, parameterTypeClass)(
parameterset=parameterSet,
name=parameterName,
string_value=parameterValue,
numerical_value=None)
parameter.save()
def characters(self, chars):
if self.processExperimentStruct:
# handle the different experiment fields
if self.grabTitle:
self.metsObject.title = chars
elif self.grabExperimentUrl:
self.metsObject.url = chars
elif self.grabAbstract:
self.metsObject.description += chars
elif self.grabMightBeAuthor:
self.mightBeAuthor = chars
elif self.grabStartTime:
self.metsObject.start_time = chars
elif self.grabEndTime:
self.metsObject.end_time = chars
# if it's really an author, add the mightBeAuthor into the
# experiment's author list
elif self.grabRoleTerm and chars == 'author':
self.metsObject.authors.append(self.mightBeAuthor)
elif self.grabInstitution:
self.institution = chars
elif self.processDatasetStruct:
if self.grabTitle:
self.metsObject.title = chars
elif self.customHandler is not None:
self.customHandler.characters(chars)
elif chars.strip() != '' and self.parameterName is not None and \
self.processMetadata:
# save the parameter values in the temporary metadata dictionary
store_metadata_value(self.tempMetadataHolder,
self.parameterName, chars)
def _getAttrValue(attrs, attrName):
try:
return attrs.getValue(attrName)
except KeyError:
return None
def _getAttrValueByQName(attrs, attrName):
try:
return attrs.getValueByQName(attrName)
except KeyError:
return None
def parseMets(filename, createdBy, expId=None):
'''Parse the METS document using the SAX Parser classes provided in the
metsparser module.
Arguments:
filename -- path of the document to parse (METS or notMETS)
created_by -- a User instance
expid -- the experiment ID to use
Returns:
The experiment ID
'''
import time
startParseTime = time.time()
logger.debug('parse experiment id: ' + str(expId))
parser = make_parser(["drv_libxml2"])
parser.setFeature(feature_namespaces, 1)
dataHolder = MetsDataHolder()
# on the first pass, we'll parse the document just so we can
# create the experiment's structure
parser.setContentHandler(MetsExperimentStructCreator(dataHolder))
parser.parse(filename)
# Get the destination directory
if expId:
sync_root = get_sync_root(prefix="%d-" % expId)
else:
sync_root = get_sync_root()
# on the second pass, we'll parse the document so that we can tie
# the metadata info with the experiment/dataset/datafile objects
parser.setContentHandler(
MetsMetadataInfoHandler(dataHolder, expId, createdBy, sync_root))
parser.parse(filename)
endParseTime = time.time()
# time difference in seconds
timeDiff = endParseTime - startParseTime
logger.debug('time difference in seconds: %s' % (timeDiff))
return (dataHolder.experimentDatabaseId, sync_root)
| |
import unittest
from compiler import ast, error, lex, parse
# pylint: disable=no-member
class TestModuleAPI(unittest.TestCase):
"""Test the API of the parse module."""
def test_parse(self):
p1 = parse.Parser()
parse.parse("").should.equal(p1.parse(""))
mock = error.LoggerMock()
p2 = parse.Parser(logger=mock)
parse.parse("", logger=mock).should.equal(p2.parse(""))
p3 = parse.Parser(start="type")
parse.parse("int", start="type").should.equal(p3.parse("int"))
def test_quiet_parse(self):
mock = error.LoggerMock()
p1 = parse.Parser(logger=mock)
(parse.quiet_parse("")).should.equal(p1.parse(""))
p2 = parse.Parser(start='type')
(parse.quiet_parse("int", start='type')).should.equal(p2.parse("int"))
class TestParserAPI(unittest.TestCase):
"""Test the API of the Parser class."""
def test_init(self):
logger = error.LoggerMock()
p1 = parse.Parser(
debug=True,
logger=logger,
optimize=True,
start="type",
verbose=True
)
p1.should.have.property("logger").being.equal(logger)
class TestParserRules(unittest.TestCase):
"""Test the Parser's coverage of Llama grammar."""
@classmethod
def setUpClass(cls):
cls.one = parse.quiet_parse("1", "expr")
cls.two = parse.quiet_parse("2", "expr")
cls.true = parse.quiet_parse("true", "expr")
cls.false = parse.quiet_parse("false", "expr")
cls.unit = parse.quiet_parse("()", "expr")
cls.xfunc = parse.quiet_parse("let x = 1", "letdef")
cls.yfunc = parse.quiet_parse("let y = 2", "letdef")
def _assert_parse_fails(self, expr, start="expr"):
"""
Assert that attempting to parse the expression from the given
start will fail.
"""
p = parse.Parser(logger=error.LoggerMock(), start=start)
p.parse(expr)
p.logger.success.should.be.false # pylint: disable=pointless-statement
def test_empty_program(self):
parse.quiet_parse("").should.equal(ast.Program([]))
def test_def_list(self):
parse.quiet_parse("", "def_list").should.equal([])
parse.quiet_parse("let x = 1", "def_list").should.equal([self.xfunc])
parse.quiet_parse("let x = 1 let y = 2", "def_list").should.equal(
[self.xfunc, self.yfunc]
)
def test_letdef(self):
parse.quiet_parse("let x = 1", "letdef").should.equal(
ast.LetDef(
[ast.FunctionDef("x", [], self.one)]
)
)
parse.quiet_parse("let rec x = 1", "letdef").should.equal(
ast.LetDef(
[ast.FunctionDef("x", [], self.one)], True
)
)
def test_function_def(self):
parse.quiet_parse("let x = 1", "def").should.equal(
ast.FunctionDef("x", [], self.one)
)
parse.quiet_parse("let x y (z:int) = 1", "def").should.equal(
ast.FunctionDef(
"x",
[ast.Param("y"), ast.Param("z", ast.Int())],
self.one
)
)
parse.quiet_parse("let x y z:int = 1", "def").should.equal(
ast.FunctionDef(
"x",
[ast.Param("y"), ast.Param("z")], self.one, ast.Int()
)
)
def test_param_list(self):
parse.quiet_parse("", "param_list").should.equal([])
parse.quiet_parse("my_param", "param_list").should.equal(
[ast.Param("my_param")]
)
parse.quiet_parse("a b", "param_list").should.equal(
[ast.Param("a"), ast.Param("b")]
)
def test_param(self):
parse.quiet_parse("my_parameter", "param").should.equal(
ast.Param("my_parameter")
)
parse.quiet_parse("(my_parameter: int)", "param").should.equal(
ast.Param("my_parameter", ast.Int())
)
self._assert_parse_fails("my_parameter: int", "param")
def test_builtin_type(self):
for name, typecon in ast.builtin_types_map.items():
parse.quiet_parse(name, "type").should.equal(typecon())
def test_star_comma_seq(self):
parse.quiet_parse("*", "star_comma_seq").should.equal(1)
parse.quiet_parse("*, *, *", "star_comma_seq").should.equal(3)
def test_array_type(self):
array_node = ast.Array(ast.Int())
parse.quiet_parse("array of int", "type").should.equal(array_node)
parse.quiet_parse("array [*, *] of int", "type").should.equal(
ast.Array(ast.Int(), 2)
)
def test_function_type(self):
func_node = ast.Function(ast.Int(), ast.Float())
parse.quiet_parse("int -> float", "type").should.equal(func_node)
def test_ref_type(self):
ref_node = ast.Ref(ast.Int())
parse.quiet_parse("int ref", "type").should.equal(ref_node)
def test_user_type(self):
user_node = ast.User("mytype")
parse.quiet_parse("mytype", "type").should.equal(user_node)
def test_type_paren(self):
parse.quiet_parse("(int)", "type").should.equal(ast.Int())
def test_const(self):
parse.quiet_parse("5", "expr").should.equal(
ast.ConstExpression(ast.Int(), 5)
)
parse.quiet_parse("5.7", "expr").should.equal(
ast.ConstExpression(ast.Float(), 5.7)
)
parse.quiet_parse("'z'", "expr").should.equal(
ast.ConstExpression(ast.Char(), "z")
)
parse.quiet_parse('"z"', "expr").should.equal(
ast.ConstExpression(ast.String(), ["z", '\0'])
)
parse.quiet_parse("true", "expr").should.equal(
ast.ConstExpression(ast.Bool(), True)
)
parse.quiet_parse("()", "expr").should.equal(
ast.ConstExpression(ast.Unit(), None)
)
def test_constr(self):
parse.quiet_parse("Node", "constr").should.equal(
ast.Constructor("Node", [])
)
parse.quiet_parse("Node of int", "constr").should.equal(
ast.Constructor("Node", [ast.Int()])
)
def test_simple_variable_def(self):
foo_var = ast.VariableDef("foo")
parse.quiet_parse("mutable foo : int", "def").should.equal(
ast.VariableDef("foo", ast.Ref(ast.Int()))
)
parse.quiet_parse("mutable foo", "def").should.equal(foo_var)
def test_array_variable_def(self):
array_var = ast.ArrayVariableDef("foo", [self.two])
parse.quiet_parse("mutable foo [2]", "def").should.equal(array_var)
parse.quiet_parse("mutable foo [2] : int", "def").should.equal(
ast.ArrayVariableDef("foo", [self.two], ast.Array(ast.Int()))
)
def test_while_expr(self):
parse.quiet_parse("while true do () done", "expr").should.equal(
ast.WhileExpression(self.true, self.unit)
)
def test_if_expr(self):
parse.quiet_parse("if true then 1 else 2", "expr").should.equal(
ast.IfExpression(self.true, self.one, self.two)
)
parse.quiet_parse("if true then ()", "expr").should.equal(
ast.IfExpression(self.true, self.unit)
)
def test_for_expr(self):
parse.quiet_parse("for i = 1 to 2 do () done", "expr").should.equal(
ast.ForExpression(
"i", self.one, self.two, self.unit
)
)
parse.quiet_parse(
"for i = 2 downto 1 do () done",
"expr"
).should.equal(
ast.ForExpression(
"i", self.two, self.one, self.unit, True
)
)
def test_pattern(self):
parse.quiet_parse("true", "pattern").should.equal(self.true)
parse.quiet_parse("Red true", "pattern").should.equal(
ast.Pattern("Red", [self.true])
)
parse.quiet_parse("(true)", "pattern").should.equal(self.true)
parse.quiet_parse("foo", "pattern").should.equal(
ast.GenidPattern("foo")
)
parse.quiet_parse("true", "pattern").should.equal(self.true)
parse.quiet_parse("false", "pattern").should.equal(self.false)
parse.quiet_parse("'c'", "pattern").should.equal(
ast.ConstExpression(ast.Char(), "c")
)
parse.quiet_parse("42.0", "pattern").should.equal(
ast.ConstExpression(ast.Float(), 42.0)
)
parse.quiet_parse("+.42.0", "pattern").should.equal(
ast.ConstExpression(ast.Float(), 42.0)
)
parse.quiet_parse("-.42.0", "pattern").should.equal(
ast.ConstExpression(ast.Float(), -42.0)
)
parse.quiet_parse("42", "pattern").should.equal(
ast.ConstExpression(ast.Int(), 42)
)
parse.quiet_parse("+42", "pattern").should.equal(
ast.ConstExpression(ast.Int(), 42)
)
parse.quiet_parse("-42", "pattern").should.equal(
ast.ConstExpression(ast.Int(), -42)
)
def test_simple_pattern_seq(self):
self._assert_parse_fails("", "simple_pattern_seq")
red, blue = ast.Pattern("Red"), ast.Pattern("Blue")
parse.quiet_parse("Red", "simple_pattern_seq").should.equal([red])
parse.quiet_parse("Red Blue", "simple_pattern_seq").should.equal(
[red, blue]
)
def test_match_expr(self):
parse.quiet_parse(
"match true with false -> 1 end", "expr"
).should.equal(
ast.MatchExpression(self.true, [ast.Clause(self.false, self.one)])
)
def test_clause(self):
parse.quiet_parse("true -> false", "clause").should.equal(
ast.Clause(self.true, self.false)
)
def test_clause_seq(self):
self._assert_parse_fails("", "clause_seq")
clause1 = ast.Clause(self.one, self.two)
clause2 = ast.Clause(self.true, self.false)
parse.quiet_parse(
"1 -> 2 | true -> false", "clause_seq"
).should.equal(
[clause1, clause2]
)
def test_delete(self):
parse.quiet_parse("delete p", "expr").should.equal(
ast.DeleteExpression(
ast.GenidExpression("p")
)
)
def _check_binary_operator(self, operator):
expr = "1 %s 2" % operator
parsed = parse.quiet_parse(expr, "expr")
parsed.should.be.an(ast.BinaryExpression)
parsed.operator.should.equal(operator)
parsed.leftOperand.should.equal(self.one)
parsed.rightOperand.should.equal(self.two)
def _check_unary_operator(self, operator):
expr = "%s 1" % operator
parsed = parse.quiet_parse(expr, "expr")
parsed.should.be.an(ast.UnaryExpression)
parsed.operator.should.equal(operator)
parsed.operand.should.equal(self.one)
def test_binary_expr(self):
for operator in list(lex.binary_operators.keys()) + ["mod"]:
self._check_binary_operator(operator)
def test_unary_expr(self):
for operator in list(lex.unary_operators.keys()) + ["not"]:
self._check_unary_operator(operator)
def test_begin_end_expr(self):
parse.quiet_parse("begin 1 end", "expr").should.equal(self.one)
def test_function_call_expr(self):
parse.quiet_parse("f 1", "expr").should.equal(
ast.FunctionCallExpression("f", [self.one])
)
def test_constructor_call_expr(self):
parse.quiet_parse("Red 1", "expr").should.equal(
ast.ConstructorCallExpression("Red", [self.one])
)
def test_simple_expr_seq(self):
self._assert_parse_fails("", "simple_expr_seq")
parse.quiet_parse("1", "simple_expr_seq").should.equal([self.one])
parse.quiet_parse("1 2", "simple_expr_seq").should.equal(
[self.one, self.two]
)
def test_dim_expr(self):
parsed = parse.quiet_parse("dim name", "expr")
parsed.should.be.an(ast.DimExpression)
parsed.name.should.equal("name")
parsed = parse.quiet_parse("dim 2 name", "expr")
parsed.should.be.an(ast.DimExpression)
parsed.name.should.equal("name")
parsed.dimension.should.equal(2)
def test_in_expr(self):
in_expr = ast.LetInExpression(self.xfunc, self.one)
parse.quiet_parse("let x = 1 in 1", "expr").should.equal(in_expr)
def test_new(self):
parse.quiet_parse("new int", "expr").should.equal(
ast.NewExpression(ast.Int())
)
def test_expr_comma_seq(self):
self._assert_parse_fails("", "expr_comma_seq")
parse.quiet_parse("1", "expr_comma_seq").should.equal([self.one])
parse.quiet_parse("1, 2", "expr_comma_seq").should.equal(
[self.one, self.two]
)
def test_array_expr(self):
parse.quiet_parse("a[1]", "expr").should.equal(
ast.ArrayExpression("a", [self.one])
)
def test_paren_expr(self):
parse.quiet_parse("(1)", "expr").should.equal(self.one)
def test_conid_expr(self):
parse.quiet_parse("Red", "expr").should.equal(
ast.ConidExpression("Red")
)
def test_genid_expr(self):
parse.quiet_parse("f", "expr").should.equal(ast.GenidExpression("f"))
def test_constr_pipe_seq(self):
self._assert_parse_fails("", "constr_pipe_seq")
parse.quiet_parse("Black | White", "constr_pipe_seq").should.equal(
[ast.Constructor("Black"),
ast.Constructor("White")]
)
def test_tdef(self):
parse.quiet_parse("color = Red", "tdef").should.equal(
ast.TDef(ast.User("color"), [ast.Constructor("Red")])
)
parse.quiet_parse("int = Red", "tdef").should.equal(
ast.TDef(ast.Int(), [ast.Constructor("Red")])
)
def test_tdef_and_seq(self):
self._assert_parse_fails("", "tdef_and_seq")
parse.quiet_parse(
"color = Red and shoes = Slacks", "tdef_and_seq"
).should.equal(
[
ast.TDef(ast.User("color"), [ast.Constructor("Red")]),
ast.TDef(ast.User("shoes"), [ast.Constructor("Slacks")])
]
)
def test_typedef(self):
parse.quiet_parse("type color = Red", "typedef").should.equal(
[ast.TDef(ast.User("color"), [ast.Constructor("Red")])]
)
def test_type_seq(self):
self._assert_parse_fails("", "type_seq")
parse.quiet_parse("int float", "type_seq").should.equal(
[ast.Int(), ast.Float()]
)
def _assert_equivalent(self, expr1, expr2=None, start="expr"):
"""
Assert that two expressions are parsed into equivalent ASTs.
You can pass either two expressions (expr1, expr2) or a sequence
of expression tuples as expr1, leaving expr2 to None.
"""
if expr2 is None:
# sequence of expressions
exprs = expr1
for expr1, expr2 in exprs:
self._assert_equivalent(expr1, expr2, start)
else:
# self.assertEqual(
# parse.quiet_parse(expr1, "expr"),
# parse.quiet_parse(expr2, "expr"),
# "'%s' must equal '%s'" % (expr1, expr2)
# )
parsed1 = parse.quiet_parse(expr1, start)
parsed2 = parse.quiet_parse(expr2, start)
parsed1.should.equal(parsed2)
def _assert_non_equivalent(self, expr1, expr2=None, start="expr"):
"""
Assert that two expressions are not parsed as equivalent ASTs.
The API is similar to _assert_equivalent.
"""
if expr2 is None:
# sequence of expressions
exprs = expr1
for expr1, expr2 in exprs:
self._assert_non_equivalent(expr1, expr2, start)
else:
parsed1 = parse.quiet_parse(expr1, start)
parsed2 = parse.quiet_parse(expr2, start)
parsed1.shouldnt.equal(parsed2)
def test_precedence_new_bang(self):
self._assert_equivalent("!new int", "!(new int)")
def test_precedence_arrayexpr_bang(self):
self._assert_equivalent("!a[0]", "!(a[0])")
def test_precedence_bang_juxtaposition(self):
self._assert_equivalent((
("!f x", "(!f) x"),
("!F x", "(!F) x")
))
def test_precedence_juxtaposition_sign(self):
self._assert_equivalent((
("+ f x", "+ (f x)"),
("+ F x", "+ (F x)"),
("- f x", "- (f x)"),
("- F x", "- (F x)"),
("+. f x", "+. (f x)"),
("+. F x", "+. (F x)"),
("-. f x", "-. (f x)"),
("-. F x", "-. (F x)"),
("not f x", "not (f x)"),
("not F x", "not (F x)"),
("delete f x", "delete (f x)"),
("delete F x", "delete (F x)")
))
def test_precedence_sign_pow(self):
self._assert_equivalent((
("+1 ** 2", "(+1) ** 2"),
("1 ** +2", "1 ** (+2)"),
("-1 ** 2", "(-1) ** 2"),
("1 ** -2", "1 ** (-2)"),
("+.1 ** 2", "(+.1) ** 2"),
("1 ** +.2", "1 ** (+.2)"),
("-.1 ** 2", "(-.1) ** 2"),
("1 ** -.2", "1 ** (-.2)"),
("not true ** 2", "(not true) ** 2"),
("1 ** not false", "1 ** (not false)"),
("delete p ** 2", "(delete p) ** 2"),
("1 ** delete p", "1 ** (delete p)"),
))
def test_precedence_pow_multiplicative(self):
self._assert_equivalent((
("1 ** 2 * 3", "(1 ** 2) * 3"),
("1 * 2 ** 3", "1 * (2 ** 3)"),
("1 ** 2 / 3", "(1 ** 2) / 3"),
("1 / 2 ** 3", "1 / (2 ** 3)"),
("1 ** 2 *. 3", "(1 ** 2) *. 3"),
("1 *. 2 ** 3", "1 *. (2 ** 3)"),
("1 ** 2 /. 3", "(1 ** 2) /. 3"),
("1 /. 2 ** 3", "1 /. (2 ** 3)"),
("1 ** 2 mod 3", "(1 ** 2) mod 3"),
("1 mod 2 ** 3", "1 mod (2 ** 3)"),
))
def test_precedence_multiplicative_additive(self):
self._assert_equivalent((
("1 + 2 * 3", "1 + (2 * 3)"),
("1 * 2 + 3", "(1 * 2) + 3"),
("1 + 2 / 3", "1 + (2 / 3)"),
("1 / 2 + 3", "(1 / 2) + 3"),
("1 + 2 *. 3", "1 + (2 *. 3)"),
("1 *. 2 + 3", "(1 *. 2) + 3"),
("1 + 2 /. 3", "1 + (2 /. 3)"),
("1 /. 2 + 3", "(1 /. 2) + 3"),
("1 + 2 mod 3", "1 + (2 mod 3)"),
("1 mod 2 + 3", "(1 mod 2) + 3"),
("1 - 2 * 3", "1 - (2 * 3)"),
("1 * 2 - 3", "(1 * 2) - 3"),
("1 - 2 / 3", "1 - (2 / 3)"),
("1 / 2 - 3", "(1 / 2) - 3"),
("1 - 2 *. 3", "1 - (2 *. 3)"),
("1 *. 2 - 3", "(1 *. 2) - 3"),
("1 - 2 /. 3", "1 - (2 /. 3)"),
("1 /. 2 - 3", "(1 /. 2) - 3"),
("1 - 2 mod 3", "1 - (2 mod 3)"),
("1 mod 2 - 3", "(1 mod 2) - 3"),
("1 +. 2 * 3", "1 +. (2 * 3)"),
("1 * 2 +. 3", "(1 * 2) +. 3"),
("1 +. 2 / 3", "1 +. (2 / 3)"),
("1 / 2 +. 3", "(1 / 2) +. 3"),
("1 +. 2 *. 3", "1 +. (2 *. 3)"),
("1 *. 2 +. 3", "(1 *. 2) +. 3"),
("1 +. 2 /. 3", "1 +. (2 /. 3)"),
("1 /. 2 +. 3", "(1 /. 2) +. 3"),
("1 +. 2 mod 3", "1 +. (2 mod 3)"),
("1 mod 2 +. 3", "(1 mod 2) +. 3"),
("1 -. 2 * 3", "1 -. (2 * 3)"),
("1 * 2 -. 3", "(1 * 2) -. 3"),
("1 -. 2 / 3", "1 -. (2 / 3)"),
("1 / 2 -. 3", "(1 / 2) -. 3"),
("1 -. 2 *. 3", "1 -. (2 *. 3)"),
("1 *. 2 -. 3", "(1 *. 2) -. 3"),
("1 -. 2 /. 3", "1 -. (2 /. 3)"),
("1 /. 2 -. 3", "(1 /. 2) -. 3"),
("1 -. 2 mod 3", "1 -. (2 mod 3)"),
("1 mod 2 -. 3", "(1 mod 2) -. 3")
))
def test_precedence_additive_commparison(self):
self._assert_equivalent((
("a + b = c", "(a + b) = c"),
("a = b + c", "a = (b + c)"),
("a - b = c", "(a - b) = c"),
("a = b - c", "a = (b - c)"),
("a +. b = c", "(a +. b) = c"),
("a = b +. c", "a = (b +. c)"),
("a -. b = c", "(a -. b) = c"),
("a = b -. c", "a = (b -. c)"),
("a + b <> c", "(a + b) <> c"),
("a <> b + c", "a <> (b + c)"),
("a - b <> c", "(a - b) <> c"),
("a <> b - c", "a <> (b - c)"),
("a +. b <> c", "(a +. b) <> c"),
("a <> b +. c", "a <> (b +. c)"),
("a -. b <> c", "(a -. b) <> c"),
("a <> b -. c", "a <> (b -. c)"),
("a + b > c", "(a + b) > c"),
("a > b + c", "a > (b + c)"),
("a - b > c", "(a - b) > c"),
("a > b - c", "a > (b - c)"),
("a +. b > c", "(a +. b) > c"),
("a > b +. c", "a > (b +. c)"),
("a -. b > c", "(a -. b) > c"),
("a > b -. c", "a > (b -. c)"),
("a + b < c", "(a + b) < c"),
("a < b + c", "a < (b + c)"),
("a - b < c", "(a - b) < c"),
("a < b - c", "a < (b - c)"),
("a +. b < c", "(a +. b) < c"),
("a < b +. c", "a < (b +. c)"),
("a -. b < c", "(a -. b) < c"),
("a < b -. c", "a < (b -. c)"),
("a + b <= c", "(a + b) <= c"),
("a <= b + c", "a <= (b + c)"),
("a - b <= c", "(a - b) <= c"),
("a <= b - c", "a <= (b - c)"),
("a +. b <= c", "(a +. b) <= c"),
("a <= b +. c", "a <= (b +. c)"),
("a -. b <= c", "(a -. b) <= c"),
("a <= b -. c", "a <= (b -. c)"),
("a + b >= c", "(a + b) >= c"),
("a >= b + c", "a >= (b + c)"),
("a - b >= c", "(a - b) >= c"),
("a >= b - c", "a >= (b - c)"),
("a +. b >= c", "(a +. b) >= c"),
("a >= b +. c", "a >= (b +. c)"),
("a -. b >= c", "(a -. b) >= c"),
("a >= b -. c", "a >= (b -. c)"),
("a + b == c", "(a + b) == c"),
("a == b + c", "a == (b + c)"),
("a - b == c", "(a - b) == c"),
("a == b - c", "a == (b - c)"),
("a +. b == c", "(a +. b) == c"),
("a == b +. c", "a == (b +. c)"),
("a -. b == c", "(a -. b) == c"),
("a == b -. c", "a == (b -. c)"),
("a + b != c", "(a + b) != c"),
("a != b + c", "a != (b + c)"),
("a - b != c", "(a - b) != c"),
("a != b - c", "a != (b - c)"),
("a +. b != c", "(a +. b) != c"),
("a != b +. c", "a != (b +. c)"),
("a -. b != c", "(a -. b) != c"),
("a != b -. c", "a != (b -. c)"),
))
def test_precedence_comparison_band(self):
self._assert_equivalent((
("a && b = c", "a && (b = c)"),
("a = b && c", "(a = b) && c"),
("a && b <> c", "a && (b <> c)"),
("a <> b && c", "(a <> b) && c"),
("a && b > c", "a && (b > c)"),
("a > b && c", "(a > b) && c"),
("a && b < c", "a && (b < c)"),
("a < b && c", "(a < b) && c"),
("a && b <= c", "a && (b <= c)"),
("a <= b && c", "(a <= b) && c"),
("a && b >= c", "a && (b >= c)"),
("a >= b && c", "(a >= b) && c"),
("a && b == c", "a && (b == c)"),
("a == b && c", "(a == b) && c"),
("a && b != c", "a && (b != c)"),
("a != b && c", "(a != b) && c"),
))
def test_precedence_band_bor(self):
self._assert_equivalent((
("a || b && c", "a || (b && c)"),
("a && b || c", "(a && b) || c"),
))
def test_precedence_bor_assign(self):
self._assert_equivalent((
("a := b || c", "a := (b || c)"),
("a || b := c", "(a || b) := c"),
))
def test_precedence_assign_ifthenelse(self):
self._assert_equivalent((
("if p then () else a := b", "if p then () else (a := b)"),
("if p then a := b", "if p then (a := b)"),
))
def test_precedence_ifthenelse_semicolon(self):
self._assert_equivalent((
("if p then 1 else 2; 3", "(if p then 1 else 2); 3"),
("if p then 2; 3", "(if p then 2); 3"),
))
def test_precedence_assign_semicolon(self):
self._assert_equivalent((
("a := b; c", "(a := b); c"),
("a; b := c", "a; (b := c)"),
))
def test_precedence_semicolon_letin(self):
self._assert_equivalent("let x = 0 in y; z", "let x = 0 in (y; z)")
def test_associativity_arrayexpr(self):
self._assert_parse_fails("a[0][0]")
def test_associativity_pow(self):
self._assert_equivalent("1 ** 2 ** 3", "1 ** (2 ** 3)")
def test_associativity_multiplicative(self):
self._assert_equivalent((
("1 * 2 * 3", "(1 * 2) * 3"),
("1 * 2 / 3", "(1 * 2) / 3"),
("1 * 2 *. 3", "(1 * 2) *. 3"),
("1 * 2 /. 3", "(1 * 2) /. 3"),
("1 / 2 * 3", "(1 / 2) * 3"),
("1 / 2 / 3", "(1 / 2) / 3"),
("1 / 2 *. 3", "(1 / 2) *. 3"),
("1 / 2 /. 3", "(1 / 2) /. 3"),
("1 *. 2 * 3", "(1 *. 2) * 3"),
("1 *. 2 / 3", "(1 *. 2) / 3"),
("1 *. 2 *. 3", "(1 *. 2) *. 3"),
("1 *. 2 /. 3", "(1 *. 2) /. 3"),
("1 /. 2 * 3", "(1 /. 2) * 3"),
("1 /. 2 / 3", "(1 /. 2) / 3"),
("1 /. 2 *. 3", "(1 /. 2) *. 3"),
("1 /. 2 /. 3", "(1 /. 2) /. 3"),
("1 mod 2 * 3", "(1 mod 2) * 3"),
("1 mod 2 / 3", "(1 mod 2) / 3"),
("1 mod 2 *. 3", "(1 mod 2) *. 3"),
("1 mod 2 /. 3", "(1 mod 2) /. 3")
))
def test_associativity_additive(self):
self._assert_equivalent((
("1 + 2 + 3", "(1 + 2) + 3"),
("1 + 2 - 3", "(1 + 2) - 3"),
("1 + 2 +. 3", "(1 + 2) +. 3"),
("1 + 2 -. 3", "(1 + 2) -. 3"),
("1 - 2 + 3", "(1 - 2) + 3"),
("1 - 2 - 3", "(1 - 2) - 3"),
("1 - 2 +. 3", "(1 - 2) +. 3"),
("1 - 2 -. 3", "(1 - 2) -. 3"),
("1 +. 2 + 3", "(1 +. 2) + 3"),
("1 +. 2 - 3", "(1 +. 2) - 3"),
("1 +. 2 +. 3", "(1 +. 2) +. 3"),
("1 +. 2 -. 3", "(1 +. 2) -. 3"),
("1 -. 2 + 3", "(1 -. 2) + 3"),
("1 -. 2 - 3", "(1 -. 2) - 3"),
("1 -. 2 +. 3", "(1 -. 2) +. 3"),
("1 -. 2 -. 3", "(1 -. 2) -. 3")
))
def test_associativity_compariosn(self):
self._assert_parse_fails("a = b = c")
self._assert_parse_fails("a <> b <> c")
self._assert_parse_fails("a > b > c")
self._assert_parse_fails("a < b < c")
self._assert_parse_fails("a <= b <= c")
self._assert_parse_fails("a >= b >= c")
self._assert_parse_fails("a == b == c")
self._assert_parse_fails("a != b != c")
def test_associativity_band(self):
self._assert_equivalent("a && b && c", "(a && b) && c")
def test_associativity_bor(self):
self._assert_equivalent("a || b || c", "(a || b) || c")
def test_associativity_assign(self):
self._assert_parse_fails("a := b := c")
def test_associativity_ifthenelse(self):
self._assert_equivalent(
"if p then if q then a else b",
"if p then (if q then a else b)"
)
def test_associativity_semicolon(self):
self._assert_equivalent("x; y; z", "(x; y); z")
def test_precedence_non_equiv(self):
self._assert_non_equivalent("f -2", "f (-2)")
def test_precedence_array_ref(self):
self._assert_equivalent(
"array of int ref",
"array of (int ref)",
start="type"
)
def test_precedence_array_func(self):
self._assert_equivalent(
"array of int -> int",
"(array of int) -> int",
start="type"
)
def test_precedence_func_ref(self):
self._assert_equivalent(
"int -> int ref",
"int -> (int ref)",
start="type"
)
# NOTE: Test for array associativity deliberately ommitted,
# as an array of array is considered an error in semantics, not syntax.
def test_associativity_ref(self):
self._assert_equivalent(
"int ref ref",
"(int ref) ref",
start="type"
)
def test_associativity_func(self):
self._assert_equivalent(
"int -> int -> int",
"int -> (int -> int)",
start="type"
)
| |
#!/usr/bin/env python2
from __future__ import print_function
"""
el
==
Open args from stdin with EDITOR_ or EDITOR
Usage:
.. code:: bash
# edit files from ls
ls | el -e
# edit files from find -print0
find . -depth 1 -print0 | el -0 -e
# print lines from stdin
printf "one\ntwo" | el --each -x echo
printf "one\ntwo" | el
find . -type f -print0 | el -0 --each -x echo
"""
# import __builtin__
import codecs
import distutils.spawn
import logging
import os
import shlex
import subprocess
import sys
if sys.version_info.major > 2:
string_types = str
unicode = str
import io
StringIO = io.StringIO
Buffer = lambda x=None: io.TextIOWrapper(io.StringIO(x))
else:
string_types = basestring
import StringIO
StringIO = StringIO.StringIO
Buffer = lambda x=None: StringIO.StringIO(x)
log = logging.getLogger('el')
DEFAULT_ENCODING = 'UTF8'
RET_OK = 0
RET_ERR_EDITOR = 3
RET_ERR_ARGS_EXPECTED = 7
RET_ERR_IN_A_SUBCOMMAND = 22
def print_help(argv=sys.argv, file=sys.stdout):
"""
Print help/usage information
Keyword Arguments:
file (file-like): file to ``print`` to (e.g. ``sys.stdout``)
"""
def usage_iter():
yield ("%s [-h] [-q] [-0] [-e] [-x <cmd>]" % argv[0])
yield ("")
yield (" el (Edit Lines): a line-based UNIX utility similar to xargs.")
yield ("")
yield (" --each|--map ## for arg in args; do cmd.format(arg); done")
yield (" -f|--force ## continue on error")
yield ("")
yield (" -x ## execute command")
yield (" -e ## execute EDITOR_ or EDITOR (default)")
yield ("")
yield (" -0 ## split by \\0")
yield ("")
yield (" -h ## print_help() message")
yield (" -v ## verbose (logging.DEBUG)")
yield ("")
for line in usage_iter():
print(line, file=file)
class Cmd(object):
"""
A shell command as a list that can be rendered with arguments.
(To avoid using string concatenation for OS commands; and instead
work with lists of arguments.)
"""
def __init__(self, cmdlist=None):
self.set_cmdlist(cmdlist)
def set_cmdlist(self, cmdlist):
"""
Set self.cmdlist (and self.cmd)
Args:
cmdlist (list[str]): shell command 'tokens'
"""
if cmdlist is None:
cmdlist = []
self.cmdlist = cmdlist
self.cmd = self._process_cmd(self.cmdlist)
# log.debug('cmd.set_cmdlist: %r' % cmd)
def __str__(self):
return u'Cmd(%r)' % (self.cmdlist)
@staticmethod
def _process_cmd(cmdlist):
"""
If cmdlist[0] is not a path
"""
binname = cmdlist[0]
find_executable = False
if '/' not in binname:
find_executable = True
if find_executable is False:
return cmdlist
else:
binpath = distutils.spawn.find_executable(binname)
if not binpath:
raise Exception("%r not found" % binname)
cmd_output = cmdlist[:]
cmd_output[0] = binpath
return cmd_output
@staticmethod
def _render_cmd(cmd, args, join_args=None, always_append_args=True):
"""
Render a command like a template; with arguments as the context.
Insert or append args to the ``cmd`` list where ``{0}`` is found,
or, at the end, if ``{0}`` is not found
and ``always_append_args`` is ``True``.
Arguments:
cmd (list[str]): a list of commands, optionally containing ``{0}``
if ``{0}`` occurs in the middle of a term (e.g. is not quoted),
the arguments will be joined together with ``join_args``
args (list[str]): zero or more arguments to insert at ``{0}``
or append, if ``always_append_args`` is ``True.
Keyword Arguments:
join_args (None, str, or callable): function to join arguments by.
If ``join_args`` is a ``str``, the callable is ``strvalue.join``.
If ``join_args`` is ``None``, the arguments will not be joined.
always_append_args (bool): whether to append arguments by default
when the ``{0}`` pattern is not found
Returns:
list[str]: a command to execute (e.g. with ``subprocess.Popen``)
"""
log.debug('render_cmd(cmd,args): (%r, %r)' % (cmd, args))
def _render_cmd_iter(cmd, args, join_args=join_args):
added = False
if join_args:
if isinstance(join_args, string_types):
join_args = join_args.join
for token in cmd:
# todo: support escaped '{{0}}', w/ regex (?)
if token and '{0}' in token:
if token == '{0}':
for x in args:
yield x
added = True
else:
if join_args:
argstr = join_args(args)
else:
argstr = args
_token = token.format(argstr) # TODO: does this mangle
yield _token
added = True
else:
yield token
if not added and always_append_args:
for x in args:
yield x
cmd = list(_render_cmd_iter(cmd, args, join_args=join_args))
log.debug('_render_cmd: %r' % cmd)
return cmd
def render(self, args, join_args=None, always_append_args=True):
"""
Call :py:method:`_render_cmd` with ``self.cmd`` and ``args
"""
cmd = self._render_cmd(self.cmd, args,
join_args=join_args,
always_append_args=True)
return cmd
@staticmethod
def _call(args, **kwargs):
"""
Wraps subprocess.call
Arguments:
args (list[str]): args for subprocess.call
(subprocess.Popen.__init__:args[0])
Returns:
int: OS retcode from ``subprocess.call``
"""
log.debug('subprocess.call(args,kwargs): (%r, %r)' % (args, kwargs))
return subprocess.call(args, **kwargs)
def run(self, args, **kwargs):
"""
Preprocess args, render, and run the command with the given arguments
Arguments:
args (list[str]): arguments with which to render self.cmdlist
(e.g. as args for subprocess.call (subprocess.Popen)).
Returns:
int: OS retcode from ``subprocess.call``
"""
join_args = kwargs.pop('join_args', None)
if hasattr(self, 'preprocess_args'):
args = self.preprocess_args(args)
log.debug("cmd.preprocess_args [%s]: %r" % (
self.__class__.__name__, args))
cmd = self.render(args, join_args=join_args)
log.info("cmd.run: %s" % cmd)
return self._call(cmd, **kwargs)
class OpenEditorCmd(Cmd):
def __init__(self, *args, **kwargs):
self.set_cmdlist(self.get_editor_cmdlist())
@staticmethod
def get_editor_cmdlist():
env = os.environ
EDITOR = env.get('EDITOR')
EDITOR_ = env.get('EDITOR_')
log.debug("EDITOR=%r" % EDITOR)
log.debug("EDITOR_=%r" % EDITOR_)
editorstr = EDITOR_ or EDITOR
if editorstr is None:
log.error("Neither EDITOR_ nor EDITOR are set")
return RET_ERR_EDITOR
editor_cmdlist = shlex.split(editorstr)
log.debug("editor_cmdlist: %r" % editor_cmdlist)
return editor_cmdlist
def preprocess_args(self, args):
"""
If args[0] starts with +, shlex.split args[0]
e.g. for '+123 README'
"""
if args and len(args) == 1 and args[0][0].lstrip()[0] == '+':
args = shlex.split(args[0])
return args
class Conf(object):
pass
def main(argv=None, stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
encoding=DEFAULT_ENCODING):
if argv is None:
_argv = argv = []
else:
_argv = argv
argv = _argv[:]
if sys.version_info.major < 3:
stdin = codecs.getreader(encoding)(stdin)
stdout = codecs.getwriter(encoding)(stdout)
conf = Conf()
conf.cmd = None
cmdlist = None
if '-x' in argv:
xpos = argv.index('-x')
cmdlist = argv[xpos + 1:]
if len(cmdlist) == 1:
#quotechars = '"\''
cmdstr = cmdlist[0] # XXX: .strip(quotechars)
try:
cmdlist = shlex.split(cmdstr)
except ValueError as e:
log.debug(cmdstr)
log.exception(e)
raise
conf.cmd = Cmd(cmdlist)
argv = argv[0:xpos]
if '-h' in argv or '--help' in argv:
print_help(file=stdout)
return 0
if '-v' in argv:
count = argv.count('-v')
if count == 1:
logging.basicConfig(
level=logging.INFO)
argv.remove('-v')
if count == 2:
logging.basicConfig(
level=logging.DEBUG)
argv.remove('-v')
argv.remove('-v')
if '-t' in argv:
argv.remove('-t')
return unittest.main()
conf.open_in_editor = False
if '-e' in argv:
conf.open_in_editor = True
conf.cmd = OpenEditorCmd()
if not (cmdlist or conf.open_in_editor):
errmsg = '''Error: Must specify '-e' or '-x <cmd>' or '-x "cmd"'''
print(errmsg, file=stderr)
print("", file=stderr)
print_help(file=stderr)
# prs.error()
return RET_ERR_ARGS_EXPECTED
conf.all_at_once = True
conf.one_at_a_time = False
for x in ['--each', '--map']:
if x in argv:
conf.one_at_a_time = True
conf.all_at_once = False
argv.remove(x)
break
conf.stop_on_error = True
if '-f' in argv or '--force' in argv:
conf.stop_on_error = False
if '--stop-on-error' in argv:
conf.stop_on_error = True
if '-0' in argv:
text = stdin.read()
lines = args = text.split('\0') # TODO: itersplit
else:
def iter_stdin(stdin):
for line in stdin:
log.info("stdin>>> %r" % line)
l = line.strip()
if l:
yield l
lines = iter_stdin(iter(stdin))
if conf.all_at_once:
args = list(lines)
else:
args = iter(lines)
log.info('cmd: %s' % conf.cmd)
log.debug("args: %r" % args)
retcode = RET_OK
if conf.all_at_once:
retcode = conf.cmd.run(args, join_args=' ')
elif conf.one_at_a_time:
retcode = RET_OK
error_count = 0
for arg in args:
_args = [arg]
_retcode = conf.cmd.run(_args, join_args=' ')
log.debug("cmd.retcode: %d" % _retcode)
if _retcode != RET_OK:
retcode = RET_ERR_IN_A_SUBCOMMAND
error_count += 1
if conf.stop_on_error:
print(
"ERROR: Stopping early (use -f to continue on errors)",
file=stderr)
break
else:
log.info(conf.cmd)
log.info(args)
return retcode
import unittest
class TestEl(unittest.TestCase):
def test_main_help(self):
cmd = ['-h']
retcode = main(argv=cmd)
self.assertEqual(retcode, 0)
def test_main_must_specify_x_or_e(self):
cmd = ['-v']
retcode = main(argv=cmd)
self.assertEqual(retcode, 7)
def test_main_ls_l(self):
cmd = ['-x', 'echo']
self._test_cmd(cmd)
def test_main_ls_l_x_echo_(self):
cmd = ['-x', 'echo', '#', ]
self._test_cmd(cmd)
def test_main_ls_l_x_echo__0(self):
cmd = ['-x', 'echo', '#', '{0}']
self._test_cmd(cmd)
def test_main_ls_l_echo_0(self):
cmd = ['-x', "echo '{0}'"]
self._test_cmd(cmd)
def _test_cmd(self, cmd):
stdin_text = []
for n in range(3):
stdin_text.append(unicode(__file__) + unicode('\n'))
stdin = StringIO("".join(stdin_text))
lines = stdin.readlines()
stdin.seek(0)
retcode = main(argv=cmd, stdin=stdin)
self.assertEqual(retcode, 0)
return retcode
if __name__ == "__main__":
if '--TEST' in sys.argv:
sys.argv.remove('--TEST')
sys.exit(unittest.main())
sys.exit(main(argv=sys.argv))
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-23 00:19:06
import re
import sys
import time
import socket
import inspect
import datetime
import traceback
from flask import render_template, request, json
from flask.ext import login
from pyspider.libs import utils, sample_handler, dataurl
from pyspider.libs.response import rebuild_response
from pyspider.processor.project_module import ProjectManager, ProjectFinder, ProjectLoader
from .app import app
default_task = {
'taskid': 'data:,on_start',
'project': '',
'url': 'data:,on_start',
'process': {
'callback': 'on_start',
},
}
default_script = inspect.getsource(sample_handler)
def verify_project_name(project):
if re.search(r"[^\w]", project):
return False
return True
@app.route('/debug/<project>', methods=['GET', 'POST'])
def debug(project):
if not verify_project_name(project):
return 'project name is not allowed!', 400
projectdb = app.config['projectdb']
info = projectdb.get(project)
if info:
script = info['script']
else:
script = (default_script
.replace('__DATE__', datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
.replace('__PROJECT_NAME__', project)
.replace('__START_URL__', request.values.get('start-urls') or '__START_URL__'))
taskid = request.args.get('taskid')
if taskid:
taskdb = app.config['taskdb']
task = taskdb.get_task(
project, taskid, ['taskid', 'project', 'url', 'fetch', 'process'])
else:
task = default_task
default_task['project'] = project
return render_template("debug.html", task=task, script=script, project_name=project)
@app.before_first_request
def enable_projects_import():
class DebuggerProjectFinder(ProjectFinder):
def get_loader(self, name):
info = app.config['projectdb'].get(name)
if info:
return ProjectLoader(info)
sys.meta_path.append(DebuggerProjectFinder())
@app.route('/debug/<project>/run', methods=['POST', ])
def run(project):
start_time = time.time()
try:
task = utils.decode_unicode_obj(json.loads(request.form['task']))
except Exception:
result = {
'fetch_result': "",
'logs': u'task json error',
'follows': [],
'messages': [],
'result': None,
'time': time.time() - start_time,
}
return json.dumps(utils.unicode_obj(result)), 200, {'Content-Type': 'application/json'}
project_info = {
'name': project,
'status': 'DEBUG',
'script': request.form['script'],
}
fetch_result = {}
try:
fetch_result = app.config['fetch'](task)
response = rebuild_response(fetch_result)
module = ProjectManager.build_module(project_info, {
'debugger': True
})
ret = module['instance'].run_task(module['module'], task, response)
except Exception:
type, value, tb = sys.exc_info()
tb = utils.hide_me(tb, globals())
logs = ''.join(traceback.format_exception(type, value, tb))
result = {
'fetch_result': fetch_result,
'logs': logs,
'follows': [],
'messages': [],
'result': None,
'time': time.time() - start_time,
}
else:
result = {
'fetch_result': fetch_result,
'logs': ret.logstr(),
'follows': ret.follows,
'messages': ret.messages,
'result': ret.result,
'time': time.time() - start_time,
}
result['fetch_result']['content'] = response.text
if (response.headers.get('content-type', '').startswith('image')):
result['fetch_result']['dataurl'] = dataurl.encode(
response.content, response.headers['content-type'])
try:
# binary data can't encode to JSON, encode result as unicode obj
# before send it to frontend
return json.dumps(utils.unicode_obj(result)), 200, {'Content-Type': 'application/json'}
except Exception:
type, value, tb = sys.exc_info()
tb = utils.hide_me(tb, globals())
logs = ''.join(traceback.format_exception(type, value, tb))
result = {
'fetch_result': "",
'logs': logs,
'follows': [],
'messages': [],
'result': None,
'time': time.time() - start_time,
}
return json.dumps(utils.unicode_obj(result)), 200, {'Content-Type': 'application/json'}
@app.route('/debug/<project>/save', methods=['POST', ])
def save(project):
if not verify_project_name(project):
return 'project name is not allowed!', 400
projectdb = app.config['projectdb']
script = request.form['script']
project_info = projectdb.get(project, fields=['name', 'status', 'group'])
if project_info and 'lock' in projectdb.split_group(project_info.get('group')) \
and not login.current_user.is_active():
return app.login_response
if project_info:
info = {
'script': script,
}
if project_info.get('status') in ('DEBUG', 'RUNNING', ):
info['status'] = 'CHECKING'
projectdb.update(project, info)
else:
info = {
'name': project,
'script': script,
'status': 'TODO',
'rate': app.config.get('max_rate', 1),
'burst': app.config.get('max_burst', 3),
}
projectdb.insert(project, info)
rpc = app.config['scheduler_rpc']
if rpc is not None:
try:
rpc.update_project()
except socket.error as e:
app.logger.warning('connect to scheduler rpc error: %r', e)
return 'rpc error', 200
return 'ok', 200
@app.route('/helper.js')
def resizer_js():
host = request.headers['Host']
return render_template("helper.js", host=host), 200, {'Content-Type': 'application/javascript'}
@app.route('/helper.html')
def resizer_html():
height = request.args.get('height')
script = request.args.get('script', '')
return render_template("helper.html", height=height, script=script)
| |
import logging
import os
import subprocess
from optparse import Values
from typing import Any, List, Optional
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.configuration import (
Configuration,
Kind,
get_configuration_files,
kinds,
)
from pip._internal.exceptions import PipError
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import get_prog, write_output
logger = logging.getLogger(__name__)
class ConfigurationCommand(Command):
"""
Manage local and global configuration.
Subcommands:
- list: List the active configuration (or from the file specified)
- edit: Edit the configuration file in an editor
- get: Get the value associated with name
- set: Set the name=value
- unset: Unset the value associated with name
- debug: List the configuration files and values defined under them
If none of --user, --global and --site are passed, a virtual
environment configuration file is used if one is active and the file
exists. Otherwise, all modifications happen to the user file by
default.
"""
ignore_require_venv = True
usage = """
%prog [<file-option>] list
%prog [<file-option>] [--editor <editor-path>] edit
%prog [<file-option>] get name
%prog [<file-option>] set name value
%prog [<file-option>] unset name
%prog [<file-option>] debug
"""
def add_options(self) -> None:
self.cmd_opts.add_option(
"--editor",
dest="editor",
action="store",
default=None,
help=(
"Editor to use to edit the file. Uses VISUAL or EDITOR "
"environment variables if not provided."
),
)
self.cmd_opts.add_option(
"--global",
dest="global_file",
action="store_true",
default=False,
help="Use the system-wide configuration file only",
)
self.cmd_opts.add_option(
"--user",
dest="user_file",
action="store_true",
default=False,
help="Use the user configuration file only",
)
self.cmd_opts.add_option(
"--site",
dest="site_file",
action="store_true",
default=False,
help="Use the current environment configuration file only",
)
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options: Values, args: List[str]) -> int:
handlers = {
"list": self.list_values,
"edit": self.open_in_editor,
"get": self.get_name,
"set": self.set_name_value,
"unset": self.unset_name,
"debug": self.list_config_values,
}
# Determine action
if not args or args[0] not in handlers:
logger.error(
"Need an action (%s) to perform.",
", ".join(sorted(handlers)),
)
return ERROR
action = args[0]
# Determine which configuration files are to be loaded
# Depends on whether the command is modifying.
try:
load_only = self._determine_file(
options, need_value=(action in ["get", "set", "unset", "edit"])
)
except PipError as e:
logger.error(e.args[0])
return ERROR
# Load a new configuration
self.configuration = Configuration(
isolated=options.isolated_mode, load_only=load_only
)
self.configuration.load()
# Error handling happens here, not in the action-handlers.
try:
handlers[action](options, args[1:])
except PipError as e:
logger.error(e.args[0])
return ERROR
return SUCCESS
def _determine_file(self, options: Values, need_value: bool) -> Optional[Kind]:
file_options = [
key
for key, value in (
(kinds.USER, options.user_file),
(kinds.GLOBAL, options.global_file),
(kinds.SITE, options.site_file),
)
if value
]
if not file_options:
if not need_value:
return None
# Default to user, unless there's a site file.
elif any(
os.path.exists(site_config_file)
for site_config_file in get_configuration_files()[kinds.SITE]
):
return kinds.SITE
else:
return kinds.USER
elif len(file_options) == 1:
return file_options[0]
raise PipError(
"Need exactly one file to operate upon "
"(--user, --site, --global) to perform."
)
def list_values(self, options: Values, args: List[str]) -> None:
self._get_n_args(args, "list", n=0)
for key, value in sorted(self.configuration.items()):
write_output("%s=%r", key, value)
def get_name(self, options: Values, args: List[str]) -> None:
key = self._get_n_args(args, "get [name]", n=1)
value = self.configuration.get_value(key)
write_output("%s", value)
def set_name_value(self, options: Values, args: List[str]) -> None:
key, value = self._get_n_args(args, "set [name] [value]", n=2)
self.configuration.set_value(key, value)
self._save_configuration()
def unset_name(self, options: Values, args: List[str]) -> None:
key = self._get_n_args(args, "unset [name]", n=1)
self.configuration.unset_value(key)
self._save_configuration()
def list_config_values(self, options: Values, args: List[str]) -> None:
"""List config key-value pairs across different config files"""
self._get_n_args(args, "debug", n=0)
self.print_env_var_values()
# Iterate over config files and print if they exist, and the
# key-value pairs present in them if they do
for variant, files in sorted(self.configuration.iter_config_files()):
write_output("%s:", variant)
for fname in files:
with indent_log():
file_exists = os.path.exists(fname)
write_output("%s, exists: %r", fname, file_exists)
if file_exists:
self.print_config_file_values(variant)
def print_config_file_values(self, variant: Kind) -> None:
"""Get key-value pairs from the file of a variant"""
for name, value in self.configuration.get_values_in_config(variant).items():
with indent_log():
write_output("%s: %s", name, value)
def print_env_var_values(self) -> None:
"""Get key-values pairs present as environment variables"""
write_output("%s:", "env_var")
with indent_log():
for key, value in sorted(self.configuration.get_environ_vars()):
env_var = f"PIP_{key.upper()}"
write_output("%s=%r", env_var, value)
def open_in_editor(self, options: Values, args: List[str]) -> None:
editor = self._determine_editor(options)
fname = self.configuration.get_file_to_edit()
if fname is None:
raise PipError("Could not determine appropriate file.")
try:
subprocess.check_call([editor, fname])
except FileNotFoundError as e:
if not e.filename:
e.filename = editor
raise
except subprocess.CalledProcessError as e:
raise PipError(
"Editor Subprocess exited with exit code {}".format(e.returncode)
)
def _get_n_args(self, args: List[str], example: str, n: int) -> Any:
"""Helper to make sure the command got the right number of arguments"""
if len(args) != n:
msg = (
"Got unexpected number of arguments, expected {}. "
'(example: "{} config {}")'
).format(n, get_prog(), example)
raise PipError(msg)
if n == 1:
return args[0]
else:
return args
def _save_configuration(self) -> None:
# We successfully ran a modifying command. Need to save the
# configuration.
try:
self.configuration.save()
except Exception:
logger.exception(
"Unable to save configuration. Please report this as a bug."
)
raise PipError("Internal Error.")
def _determine_editor(self, options: Values) -> str:
if options.editor is not None:
return options.editor
elif "VISUAL" in os.environ:
return os.environ["VISUAL"]
elif "EDITOR" in os.environ:
return os.environ["EDITOR"]
else:
raise PipError("Could not determine editor to use.")
| |
# This file is NOT licensed under the GPLv3, which is the license for the rest
# of YouCompleteMe.
#
# Here's the license text for this file:
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# For more information, please refer to <http://unlicense.org/>
import os
import subprocess
import ycm_core
p = subprocess.Popen('gcc -dumpversion', stdout=subprocess.PIPE, shell=True)
gcc_version = p.stdout.read().decode().strip()
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
#'-Werror',
#'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-Wno-unused-parameter',
'-fexceptions',
'-DNDEBUG',
# You 100% do NOT need -DUSE_CLANG_COMPLETER in your flags; only the YCM
# source code needs it.
'-DUSE_CLANG_COMPLETER',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++14',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-isystem',
'/usr/include/c++/{}'.format(gcc_version),
'-isystem',
'/usr/lib/gcc/x86_64-pc-linux-gnu/{}/include'.format(gcc_version),
'-isystem',
'/usr/lib/gcc/x86_64-pc-linux-gnu/{}/include-fixed'.format(gcc_version),
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-isystem',
'../BoostParts',
'-isystem',
# This path will only work on OS X, but extra paths that don't exist are not
# harmful
'/System/Library/Frameworks/Python.framework/Headers',
'-isystem',
'../llvm/include',
'-isystem',
'../llvm/tools/clang/include',
'-I',
'.',
'-I',
'./ClangCompleter',
'-isystem',
'./tests/gmock/gtest',
'-isystem',
'./tests/gmock/gtest/include',
'-isystem',
'./tests/gmock',
'-isystem',
'./tests/gmock/include',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/../include/c++/v1',
'-isystem',
'/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
'-isystem',
'/usr/lib/llvm-3.5/include',
'-isystem',
'/usr/include/clang',
'-isystem',
'/usr/include/llvm',
'-D__STDC_LIMIT_MACROS',
'-D__STDC_CONSTANT_MACROS'
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
try:
final_flags.remove( '-stdlib=libc++' )
except ValueError:
pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| |
#!/usr/bin/env python
from socket import gethostbyname
from clusto.scripthelpers import init_script
from clusto.drivers import *
from diggext.drivers import *
import clusto
def get_factory(name, layout=None):
if not layout:
rack = clusto.get_by_name(name)
layout = rack.attr_value(key='racklayout')
factory = LAYOUTS.get(str(layout), None)
if factory:
factory = factory(name, rack.parents(clusto_types=['datacenter']))
return factory
class RackFactory(object):
def bind_dns_ip_to_osport(self, obj, osport, porttype=None, portnum=None, domain='digg.internal'):
ip = gethostbyname('%s.%s' % (obj.name, domain))
obj.bind_ip_to_osport(ip, osport, porttype=porttype, portnum=portnum)
class Digg201001RackFactory(RackFactory):
LAYOUT_NAME = '201001'
SWITCHPORT_TO_RU = {
1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11,
12: 12, 13: 13, 14: 14, 15: 15, 16: 16, 17: 17, 18: 18, 19: 19, 20: 20,
21: 21, 22: 22, 23: 23,
24: 25, 25: 26, 26: 27, 27: 28, 28: 29, 29: 30, 30: 31
}
SWITCHPORT_TO_PWR = {
1: 'bb1', 2: 'bb2', 3: 'bb3', 4: 'bb4', 5: 'bb5', 6: 'bb6', 7: 'bb7',
8: 'bb8', 9: 'ba1', 10: 'ba2', 11: 'ba3', 12: 'ba4', 13: 'ba5',
14: 'ba6', 15: 'ba7', 16: 'ba8', 17: 'ab1', 18: 'ab2', 19: 'ab3',
20: 'ab4', 21: 'ab5', 22: 'ab6', 23: 'ab7', 24: 'aa1', 25: 'aa2',
26: 'aa3', 27: 'aa4', 28: 'aa5', 29: 'aa6', 30: 'aa7'
}
def __init__(self, name, datacenter):
self.datacenter = datacenter
self.rack = clusto.get_or_create(name, APCRack)
self.switch = clusto.get_or_create(name + '-sw1', Cisco4948)
self.console = clusto.get_or_create(name + '-ts1', OpenGearCM4148)
self.power = clusto.get_or_create(name + '-pwr1', PowerTowerXM)
def connect_ports(self):
self.rack.set_attr(key='racklayout', value=self.LAYOUT_NAME)
if not self.rack in self.datacenter:
self.datacenter.insert(self.rack)
if not self.power in self.rack:
self.rack.insert(self.power, 41)
if self.power.port_free('nic-eth', 1):
self.power.connect_ports('nic-eth', 1, self.switch, 44)
if self.power.port_free('console-serial', 1):
self.power.connect_ports('console-serial', 1, self.console, 44)
if not self.switch in self.rack:
self.rack.insert(self.switch, 36)
if self.switch.port_free('pwr-nema-5', 1):
self.switch.connect_ports('pwr-nema-5', 1, self.power, 'aa8')
if self.switch.port_free('console-serial', 1):
self.switch.connect_ports('console-serial', 1, self.console, 48)
if not self.console in self.rack:
self.rack.insert(self.console, 34)
if self.console.port_free('pwr-nema-5', 1):
self.console.connect_ports('pwr-nema-5', 1, self.power, 'ab8')
if self.console.port_free('nic-eth', 1):
self.console.connect_ports('nic-eth', 1, self.switch, 43)
self.bind_dns_ip_to_osport(self.switch, 'Vlan442')
self.bind_dns_ip_to_osport(self.console, 'nic0', porttype='nic-eth', portnum=1)
self.bind_dns_ip_to_osport(self.power, 'nic0', porttype='nic-eth', portnum=1)
def add_server(self, server, switchport):
if not server in self.rack:
self.rack.insert(server, self.SWITCHPORT_TO_RU[switchport])
if server.port_free('nic-eth', 1):
server.connect_ports('nic-eth', 1, self.switch, switchport)
if server.port_free('pwr-nema-5', 1):
server.connect_ports('pwr-nema-5', 1, self.power, self.SWITCHPORT_TO_PWR[switchport])
if server.port_free('console-serial', 1):
server.connect_ports('console-serial', 1, self.console, switchport)
def get_driver(self, switchport):
return PenguinServer
class Digg5555RackFactory(RackFactory):
LAYOUT_NAME = '5555'
SWITCHPORT_TO_RU = {
1:1, 2:2, 3:3, 4:4, 5:5,
6:7, 7:8, 8:9, 9:10, 10:11,
11:13, 12:14, 13:15, 14:16, 15:17,
16:19, 17:20, 18:21, 19:22, 20:23,
21:1, 22:2, 23:3, 24:4, 25:5,
26:7, 27:8, 28:9, 29:10, 30:11,
31:13, 32:14, 33:15, 34:16, 35:17,
36:19, 37:20, 38:21, 39:22, 40:23,
}
SWITCHPORT_TO_PWR = {
1: 'bb1', 2: 'bb2', 3: 'bb3', 4: 'bb4', 5: 'bb5',
6: 'ba1', 7: 'ba2', 8: 'ba3', 9: 'ba4', 10: 'ba5',
11: 'ab1', 12: 'ab2', 13: 'ab3', 14: 'ab4', 15: 'ab5',
16: 'aa1', 17: 'aa2', 18: 'aa3', 19: 'aa4', 20: 'aa5',
}
def __init__(self, name, datacenter):
self.datacenter = datacenter
self.rack = clusto.get_or_create(name, APCRack)
self.switch = clusto.get_or_create(name + '-sw1', Cisco4948)
self.console = clusto.get_or_create(name + '-ts1', OpenGearCM4148)
self.power = clusto.get_or_create(name + '-pwr1', PowerTowerXM)
def connect_ports(self):
self.rack.set_attr(key='racklayout', value=self.LAYOUT_NAME)
if not self.rack in self.datacenter:
self.datacenter.insert(self.rack)
if not self.power in self.rack:
self.rack.insert(self.power, 29)
if self.power.port_free('nic-eth', 1):
self.power.connect_ports('nic-eth', 1, self.switch, 44)
if self.power.port_free('console-serial', 1):
self.power.connect_ports('console-serial', 1, self.console, 44)
if not self.switch in self.rack:
self.rack.insert(self.switch, 31)
if self.switch.port_free('pwr-nema-5', 1):
self.switch.connect_ports('pwr-nema-5', 1, self.power, 'aa8')
if self.switch.port_free('console-serial', 1):
self.switch.connect_ports('console-serial', 1, self.console, 48)
if not self.console in self.rack:
self.rack.insert(self.console, 30)
if self.console.port_free('pwr-nema-5', 1):
self.console.connect_ports('pwr-nema-5', 1, self.power, 'ab8')
if self.console.port_free('nic-eth', 1):
self.console.connect_ports('nic-eth', 1, self.switch, 43)
self.bind_dns_ip_to_osport(self.switch, 'Vlan442')
self.bind_dns_ip_to_osport(self.console, 'nic0', porttype='nic-eth', portnum=1)
self.bind_dns_ip_to_osport(self.power, 'nic0', porttype='nic-eth', portnum=1)
def add_server(self, server, switchport):
if switchport > 20:
switchport -= 20
if not server in self.rack:
self.rack.insert(server, self.SWITCHPORT_TO_RU[switchport])
if server.port_free('nic-eth', 1):
server.connect_ports('nic-eth', 1, self.switch, switchport)
if server.port_free('nic-eth', 2):
server.connect_ports('nic-eth', 2, self.switch, switchport + 20)
if server.port_free('pwr-nema-5', 1):
server.connect_ports('pwr-nema-5', 1, self.power, self.SWITCHPORT_TO_PWR[switchport])
if server.port_free('console-serial', 1):
server.connect_ports('console-serial', 1, self.console, switchport)
def get_driver(self, switchport):
return PenguinServer
class Digg4444RackFactory(Digg5555RackFactory):
LAYOUT_NAME = '4444'
class Digg53532URackFactory(RackFactory):
LAYOUT_NAME = '53532U'
SWITCHPORT_TO_RU = {
1: 1, 2: 2, 3: 3, 4: 4, 5: 5,
6: 7, 7: 8, 8: 9, 9: 10, 10: 11,
11: 13, 12: 14, 13: 15, 14: 16, 15: 17,
16: 19, 17: 20, 18: 21, 19: [35,36], 20: [33, 34]
}
SWITCHPORT_TO_PWR = {
1: 'bb1', 2: 'bb2', 3: 'bb3', 4: 'bb4', 5: 'bb5',
6: 'ba1', 7: 'ba2', 8: 'ba3', 9: 'ba4', 10: 'ba5',
11: 'ab1', 12: 'ab2', 13: 'ab3', 14: 'ab4', 15: 'ab5',
16: 'aa1', 17: 'aa2', 18: 'aa3', 19: ['aa7', 'ba7'],
20: ['aa6', 'ba6']
}
def __init__(self, name, datacenter):
self.datacenter = datacenter
self.rack = clusto.get_or_create(name, APCRack)
self.switch = clusto.get_or_create(name + '-sw1', Cisco4948)
self.console = clusto.get_or_create(name + '-ts1', OpenGearCM4148)
self.power = clusto.get_or_create(name + '-pwr1', PowerTowerXM)
def connect_ports(self):
self.rack.set_attr(key='racklayout', value=self.LAYOUT_NAME)
if not self.rack in self.datacenter:
self.datacenter.insert(self.rack)
if not self.power in self.rack:
self.rack.insert(self.power, 41)
if self.power.port_free('nic-eth', 1):
self.power.connect_ports('nic-eth', 1, self.switch, 44)
if self.power.port_free('console-serial', 1):
self.power.connect_ports('console-serial', 1, self.console, 44)
if not self.switch in self.rack:
self.rack.insert(self.switch, 36)
if self.switch.port_free('pwr-nema-5', 1):
self.switch.connect_ports('pwr-nema-5', 1, self.power, 'aa8')
if self.switch.port_free('console-serial', 1):
self.switch.connect_ports('console-serial', 1, self.console, 48)
if not self.console in self.rack:
self.rack.insert(self.console, 34)
if self.console.port_free('pwr-nema-5', 1):
self.console.connect_ports('pwr-nema-5', 1, self.power, 'ab8')
if self.console.port_free('nic-eth', 1):
self.console.connect_ports('nic-eth', 1, self.switch, 43)
self.bind_dns_ip_to_osport(self.switch, 'Vlan442')
self.bind_dns_ip_to_osport(self.console, 'nic0', porttype='nic-eth', portnum=1)
self.bind_dns_ip_to_osport(self.power, 'nic0', porttype='nic-eth', portnum=1)
def add_server(self, server, switchport):
if not server in self.rack:
self.rack.insert(server, self.SWITCHPORT_TO_RU[switchport])
if server.port_free('nic-eth', 1):
server.connect_ports('nic-eth', 1, self.switch, switchport)
for i in range(len(SWITCHPORT_TO_PWR[switchport])):
if server.port_free('pwr-nema-5', i):
server.connect_ports('pwr-nema-5', i, self.power, self.SWITCHPORT_TO_PWR[switchport])
if server.port_free('console-serial', 1):
server.connect_ports('console-serial', 1, self.console, switchport)
def get_driver(self, switchport):
if isinstance(self.SWITCHPORT_TO_RU, list) and len(self.SWITCHPORT_TO_RU) == 2:
return PenguinServer2U
else:
return PenguinServer
class Digg54542URackFactory(RackFactory):
LAYOUT_NAME = '54542U'
SWITCHPORT_TO_RU = {
1: 1, 2: 2, 3: 3, 4: 4, 5: 5,
6: 7, 7: 8, 8: 9, 9: 10, 10: 11,
11: 13, 12: 14, 13: 15, 14: 16, 15: 17,
16: 19, 17: 20, 18: 21, 19: 22, 20: [33, 34]
}
SWITCHPORT_TO_PWR = {
1: 'bb1', 2: 'bb2', 3: 'bb3', 4: 'bb4', 5: 'bb5',
6: 'ba1', 7: 'ba2', 8: 'ba3', 9: 'ba4', 10: 'ba5',
11: 'ab1', 12: 'ab2', 13: 'ab3', 14: 'ab4', 15: 'ab5',
16: 'aa1', 17: 'aa2', 18: 'aa3', 19: 'aa4',
20: ['aa6', 'ba6']
}
def __init__(self, name, datacenter):
self.datacenter = datacenter
self.rack = clusto.get_or_create(name, APCRack)
self.switch = clusto.get_or_create(name + '-sw1', Cisco4948)
self.console = clusto.get_or_create(name + '-ts1', OpenGearCM4148)
self.power = clusto.get_or_create(name + '-pwr1', PowerTowerXM)
def connect_ports(self):
self.rack.set_attr(key='racklayout', value=self.LAYOUT_NAME)
if not self.rack in self.datacenter:
self.datacenter.insert(self.rack)
if not self.power in self.rack:
self.rack.insert(self.power, 41)
if self.power.port_free('nic-eth', 1):
self.power.connect_ports('nic-eth', 1, self.switch, 44)
if self.power.port_free('console-serial', 1):
self.power.connect_ports('console-serial', 1, self.console, 44)
if not self.switch in self.rack:
self.rack.insert(self.switch, 36)
if self.switch.port_free('pwr-nema-5', 1):
self.switch.connect_ports('pwr-nema-5', 1, self.power, 'aa8')
if self.switch.port_free('console-serial', 1):
self.switch.connect_ports('console-serial', 1, self.console, 48)
if not self.console in self.rack:
self.rack.insert(self.console, 34)
if self.console.port_free('pwr-nema-5', 1):
self.console.connect_ports('pwr-nema-5', 1, self.power, 'ab8')
if self.console.port_free('nic-eth', 1):
self.console.connect_ports('nic-eth', 1, self.switch, 43)
self.bind_dns_ip_to_osport(self.switch, 'Vlan442')
self.bind_dns_ip_to_osport(self.console, 'nic0', porttype='nic-eth', portnum=1)
self.bind_dns_ip_to_osport(self.power, 'nic0', porttype='nic-eth', portnum=1)
def add_server(self, server, switchport):
if not server in self.rack:
self.rack.insert(server, self.SWITCHPORT_TO_RU[switchport])
if server.port_free('nic-eth', 1):
server.connect_ports('nic-eth', 1, self.switch, switchport)
for i in range(len(SWITCHPORT_TO_PWR[switchport])):
if server.port_free('pwr-nema-5', i):
server.connect_ports('pwr-nema-5', i, self.power, self.SWITCHPORT_TO_PWR[switchport])
if server.port_free('console-serial', 1):
server.connect_ports('console-serial', 1, self.console, switchport)
def get_driver(self, switchport):
if isinstance(self.SWITCHPORT_TO_RU, list) and len(self.SWITCHPORT_TO_RU) == 2:
return PenguinServer2U
else:
return PenguinServer
LAYOUTS = {}
for factory in [Digg4444RackFactory, Digg5555RackFactory, Digg201001RackFactory]:
LAYOUTS[factory.LAYOUT_NAME] = factory
| |
from bisect import bisect
from uhashring.ring_ketama import KetamaRing
from uhashring.ring_meta import MetaRing
class HashRing:
"""Implement a consistent hashing ring."""
def __init__(self, nodes=[], **kwargs):
"""Create a new HashRing given the implementation.
:param nodes: nodes used to create the continuum (see doc for format).
:param hash_fn: use this callable function to hash keys, can be set to
'ketama' to use the ketama compatible implementation.
:param vnodes: default number of vnodes per node.
:param weight_fn: use this function to calculate the node's weight.
"""
hash_fn = kwargs.get("hash_fn", None)
vnodes = kwargs.get("vnodes", None)
weight_fn = kwargs.get("weight_fn", None)
if hash_fn == "ketama":
ketama_args = {k: v for k, v in kwargs.items() if k in ("replicas",)}
if vnodes is None:
vnodes = 40
self.runtime = KetamaRing(**ketama_args)
else:
if vnodes is None:
vnodes = 160
self.runtime = MetaRing(hash_fn)
self._default_vnodes = vnodes
self.hashi = self.runtime.hashi
if weight_fn and not hasattr(weight_fn, "__call__"):
raise TypeError("weight_fn should be a callable function")
self._weight_fn = weight_fn
if self._configure_nodes(nodes):
self.runtime._create_ring(self.runtime._nodes.items())
def _configure_nodes(self, nodes):
"""Parse and set up the given nodes.
:param nodes: nodes used to create the continuum (see doc for format).
"""
if isinstance(nodes, str):
nodes = [nodes]
elif not isinstance(nodes, (dict, list)):
raise ValueError(
"nodes configuration should be a list or a dict,"
" got {}".format(type(nodes))
)
conf_changed = False
for node in nodes:
conf = {
"hostname": node,
"instance": None,
"nodename": node,
"port": None,
"vnodes": self._default_vnodes,
"weight": 1,
}
current_conf = self.runtime._nodes.get(node, {})
nodename = node
# new node, trigger a ring update
if not current_conf:
conf_changed = True
# complex config
if isinstance(nodes, dict):
node_conf = nodes[node]
if isinstance(node_conf, int):
conf["weight"] = node_conf
elif isinstance(node_conf, dict):
for k, v in node_conf.items():
if k in conf:
conf[k] = v
# changing those config trigger a ring update
if k in ["nodename", "vnodes", "weight"]:
if current_conf.get(k) != v:
conf_changed = True
else:
raise ValueError(
"node configuration should be a dict or an int,"
" got {}".format(type(node_conf))
)
if self._weight_fn:
conf["weight"] = self._weight_fn(**conf)
# changing the weight of a node trigger a ring update
if current_conf.get("weight") != conf["weight"]:
conf_changed = True
self.runtime._nodes[nodename] = conf
return conf_changed
def __delitem__(self, nodename):
"""Remove the given node.
:param nodename: the node name.
"""
self.runtime._remove_node(nodename)
remove_node = __delitem__
def __getitem__(self, key):
"""Returns the instance of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "instance")
get_node_instance = __getitem__
def __setitem__(self, nodename, conf={"weight": 1}):
"""Add the given node with its associated configuration.
:param nodename: the node name.
:param conf: the node configuration.
"""
if self._configure_nodes({nodename: conf}):
self.runtime._create_ring([(nodename, self._nodes[nodename])])
add_node = __setitem__
def _get_pos(self, key):
"""Get the index of the given key in the sorted key list.
We return the position with the nearest hash based on
the provided key unless we reach the end of the continuum/ring
in which case we return the 0 (beginning) index position.
:param key: the key to hash and look for.
"""
p = bisect(self.runtime._keys, self.hashi(key))
if p == len(self.runtime._keys):
return 0
else:
return p
def _get(self, key, what):
"""Generic getter magic method.
The node with the nearest but not less hash value is returned.
:param key: the key to look for.
:param what: the information to look for in, allowed values:
- instance (default): associated node instance
- nodename: node name
- pos: index of the given key in the ring
- tuple: ketama compatible (pos, name) tuple
- weight: node weight
"""
if not self.runtime._ring:
return None
pos = self._get_pos(key)
if what == "pos":
return pos
nodename = self.runtime._ring[self.runtime._keys[pos]]
if what in ["hostname", "instance", "port", "weight"]:
return self.runtime._nodes[nodename][what]
elif what == "dict":
return self.runtime._nodes[nodename]
elif what == "nodename":
return nodename
elif what == "tuple":
return (self.runtime._keys[pos], nodename)
def get(self, key):
"""Returns the node object dict matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "dict")
def get_instances(self):
"""Returns a list of the instances of all the configured nodes."""
return [
c.get("instance") for c in self.runtime._nodes.values() if c.get("instance")
]
def get_key(self, key):
"""Alias of ketama hashi method, returns the hash of the given key.
This method is present for hash_ring compatibility.
:param key: the key to look for.
"""
return self.hashi(key)
def get_node(self, key):
"""Returns the node name of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "nodename")
def get_node_hostname(self, key):
"""Returns the hostname of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "hostname")
def get_node_port(self, key):
"""Returns the port of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "port")
def get_node_pos(self, key):
"""Returns the index position of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "pos")
def get_node_weight(self, key):
"""Returns the weight of the node matching the hashed key.
:param key: the key to look for.
"""
return self._get(key, "weight")
def get_nodes(self):
"""Returns a list of the names of all the configured nodes."""
return self.runtime._nodes.keys()
def get_points(self):
"""Returns a ketama compatible list of (position, nodename) tuples."""
return [(k, self.runtime._ring[k]) for k in self.runtime._keys]
def get_server(self, key):
"""Returns a ketama compatible (position, nodename) tuple.
:param key: the key to look for.
"""
return self._get(key, "tuple")
def iterate_nodes(self, key, distinct=True):
"""hash_ring compatibility implementation.
Given a string key it returns the nodes as a generator that
can hold the key.
The generator iterates one time through the ring
starting at the correct position.
if `distinct` is set, then the nodes returned will be unique,
i.e. no virtual copies will be returned.
"""
if not self.runtime._ring:
yield None
else:
for node in self.range(key, unique=distinct):
yield node["nodename"]
def print_continuum(self):
"""Prints a ketama compatible continuum report."""
numpoints = len(self.runtime._keys)
if numpoints:
print(f"Numpoints in continuum: {numpoints}")
else:
print("Continuum empty")
for p in self.get_points():
point, node = p
print(f"{node} ({point})")
def range(self, key, size=None, unique=True):
"""Returns a generator of nodes' configuration available
in the continuum/ring.
:param key: the key to look for.
:param size: limit the list to at most this number of nodes.
:param unique: a node may only appear once in the list (default True).
"""
all_nodes = set()
if unique:
size = size or len(self.runtime._nodes)
else:
all_nodes = []
pos = self._get_pos(key)
for key in self.runtime._keys[pos:]:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
else:
for i, key in enumerate(self.runtime._keys):
if i < pos:
nodename = self.runtime._ring[key]
if unique:
if nodename in all_nodes:
continue
all_nodes.add(nodename)
else:
all_nodes.append(nodename)
yield self.runtime._nodes[nodename]
if len(all_nodes) == size:
break
def regenerate(self):
self.runtime._create_ring(self.runtime._nodes.items())
@property
def conf(self):
return self.runtime._nodes
nodes = conf
@property
def distribution(self):
return self.runtime._distribution
@property
def ring(self):
return self.runtime._ring
continuum = ring
@property
def size(self):
return len(self.runtime._ring)
@property
def _ring(self):
return self.runtime._ring
@property
def _nodes(self):
return self.runtime._nodes
@property
def _keys(self):
return self.runtime._keys
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Enomaly ECP driver
"""
from libcloud.interface import INodeDriver
from libcloud.base import NodeDriver, NodeSize, NodeLocation
from libcloud.base import NodeImage, Node
from libcloud.base import Response, ConnectionUserAndKey
from libcloud.types import Provider, NodeState, InvalidCredsException
from libcloud.base import is_private_subnet
from zope.interface import implements
import time
import base64
import httplib
import socket
import os
# JSON is included in the standard library starting with Python 2.6. For 2.5
# and 2.4, there's a simplejson egg at: http://pypi.python.org/pypi/simplejson
try: import json
except: import simplejson as json
#Defaults
API_HOST = ''
API_PORT = (80,443)
class ECPResponse(Response):
def success(self):
if self.status == httplib.OK or self.status == httplib.CREATED:
try:
j_body = json.loads(self.body)
except ValueError:
self.error = "JSON response cannot be decoded."
return False
if j_body['errno'] == 0:
return True
else:
self.error = "ECP error: %s" % j_body['message']
return False
elif self.status == httplib.UNAUTHORIZED:
raise InvalidCredsException()
else:
self.error = "HTTP Error Code: %s" % self.status
return False
def parse_error(self):
return self.error
#Interpret the json responses - no error checking required
def parse_body(self):
return json.loads(self.body)
def getheaders(self):
return self.headers
class ECPConnection(ConnectionUserAndKey):
"""
Connection class for the Enomaly ECP driver
"""
responseCls = ECPResponse
host = API_HOST
port = API_PORT
def add_default_headers(self, headers):
#Authentication
username = self.user_id
password = self.key
base64string = base64.encodestring(
'%s:%s' % (username, password))[:-1]
authheader = "Basic %s" % base64string
headers['Authorization']= authheader
return headers
def _encode_multipart_formdata(self, fields):
"""
Based on Wade Leftwich's function:
http://code.activestate.com/recipes/146306/
"""
#use a random boundary that does not appear in the fields
boundary = ''
while boundary in ''.join(fields):
boundary = os.urandom(16).encode('hex')
L = []
for i in fields:
L.append('--' + boundary)
L.append('Content-Disposition: form-data; name="%s"' % i)
L.append('')
L.append(fields[i])
L.append('--' + boundary + '--')
L.append('')
body = '\r\n'.join(L)
content_type = 'multipart/form-data; boundary=%s' % boundary
header = {'Content-Type':content_type}
return header, body
class ECPNodeDriver(NodeDriver):
"""
Enomaly ECP node driver
"""
name = "Enomaly Elastic Computing Platform"
type = Provider.ECP
connectionCls = ECPConnection
implements(INodeDriver)
def list_nodes(self):
"""
Returns a list of all running Nodes
"""
#Make the call
res = self.connection.request('/rest/hosting/vm/list').parse_body()
#Put together a list of node objects
nodes=[]
for vm in res['vms']:
node = self._to_node(vm)
if not node == None:
nodes.append(node)
#And return it
return nodes
def _to_node(self, vm):
"""
Turns a (json) dictionary into a Node object.
This returns only running VMs.
"""
#Check state
if not vm['state'] == "running":
return None
#IPs
iplist = [interface['ip'] for interface in vm['interfaces'] if interface['ip'] != '127.0.0.1']
public_ips = []
private_ips = []
for ip in iplist:
try:
socket.inet_aton(ip)
except socket.error:
# not a valid ip
continue
if is_private_subnet(ip):
private_ips.append(ip)
else:
public_ips.append(ip)
#Create the node object
n = Node(
id=vm['uuid'],
name=vm['name'],
state=NodeState.RUNNING,
public_ip=public_ips,
private_ip=private_ips,
driver=self,
)
return n
def reboot_node(self, node):
"""
Shuts down a VM and then starts it again.
"""
#Turn the VM off
#Black magic to make the POST requests work
d = self.connection._encode_multipart_formdata({'action':'stop'})
self.connection.request(
'/rest/hosting/vm/%s' % node.id,
method='POST',
headers=d[0],
data=d[1]
).parse_body()
node.state = NodeState.REBOOTING
#Wait for it to turn off and then continue (to turn it on again)
while node.state == NodeState.REBOOTING:
#Check if it's off.
response = self.connection.request(
'/rest/hosting/vm/%s' % node.id
).parse_body()
if response['vm']['state'] == 'off':
node.state = NodeState.TERMINATED
else:
time.sleep(5)
#Turn the VM back on.
#Black magic to make the POST requests work
d = self.connection._encode_multipart_formdata({'action':'start'})
self.connection.request(
'/rest/hosting/vm/%s' % node.id,
method='POST',
headers=d[0],
data=d[1]
).parse_body()
node.state = NodeState.RUNNING
return True
def destroy_node(self, node):
"""
Shuts down and deletes a VM.
"""
#Shut down first
#Black magic to make the POST requests work
d = self.connection._encode_multipart_formdata({'action':'stop'})
self.connection.request(
'/rest/hosting/vm/%s' % node.id,
method = 'POST',
headers=d[0],
data=d[1]
).parse_body()
#Ensure there was no applicationl level error
node.state = NodeState.PENDING
#Wait for the VM to turn off before continuing
while node.state == NodeState.PENDING:
#Check if it's off.
response = self.connection.request(
'/rest/hosting/vm/%s' % node.id
).parse_body()
if response['vm']['state'] == 'off':
node.state = NodeState.TERMINATED
else:
time.sleep(5)
#Delete the VM
#Black magic to make the POST requests work
d = self.connection._encode_multipart_formdata({'action':'delete'})
self.connection.request(
'/rest/hosting/vm/%s' % (node.id),
method='POST',
headers=d[0],
data=d[1]
).parse_body()
return True
def list_images(self, location=None):
"""
Returns a list of all package templates aka appiances aka images
"""
#Make the call
response = self.connection.request(
'/rest/hosting/ptemplate/list').parse_body()
#Turn the response into an array of NodeImage objects
images = []
for ptemplate in response['packages']:
images.append(NodeImage(
id = ptemplate['uuid'],
name= '%s: %s' % (ptemplate['name'], ptemplate['description']),
driver = self,
))
return images
def list_sizes(self, location=None):
"""
Returns a list of all hardware templates
"""
#Make the call
response = self.connection.request(
'/rest/hosting/htemplate/list').parse_body()
#Turn the response into an array of NodeSize objects
sizes = []
for htemplate in response['templates']:
sizes.append(NodeSize(
id = htemplate['uuid'],
name = htemplate['name'],
ram = htemplate['memory'],
disk = 0, #Disk is independent of hardware template
bandwidth = 0, #There is no way to keep track of bandwidth
price = 0, #The billing system is external
driver = self,
))
return sizes
def list_locations(self):
"""
This feature does not exist in ECP. Returns hard coded dummy location.
"""
return [
NodeLocation(id=1,
name="Cloud",
country='',
driver=self),
]
def create_node(self, **kwargs):
"""
Creates a virtual machine.
Parameters: name (string), image (NodeImage), size (NodeSize)
"""
#Find out what network to put the VM on.
res = self.connection.request('/rest/hosting/network/list').parse_body()
#Use the first / default network because there is no way to specific
#which one
network = res['networks'][0]['uuid']
#Prepare to make the VM
data = {
'name' : str(kwargs['name']),
'package' : str(kwargs['image'].id),
'hardware' : str(kwargs['size'].id),
'network_uuid' : str(network),
'disk' : ''
}
#Black magic to make the POST requests work
d = self.connection._encode_multipart_formdata(data)
response = self.connection.request(
'/rest/hosting/vm/',
method='PUT',
headers = d[0],
data=d[1]
).parse_body()
#Create a node object and return it.
n = Node(
id=response['machine_id'],
name=data['name'],
state=NodeState.PENDING,
public_ip=[],
private_ip=[],
driver=self,
)
return n
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
__all__ = [
'GandiDNSDriver'
]
from libcloud.common.gandi import BaseGandiDriver, GandiConnection
from libcloud.common.gandi import GandiResponse
from libcloud.dns.types import Provider, RecordType
from libcloud.dns.types import RecordError
from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError
from libcloud.dns.base import DNSDriver, Zone, Record
TTL_MIN = 30
TTL_MAX = 2592000 # 30 days
class NewZoneVersion(object):
"""
Changes to a zone in the Gandi DNS service need to be wrapped in a new
version object. The changes are made to the new version, then that
version is made active.
In effect, this is a transaction.
Any calls made inside this context manager will be applied to a new version
id. If your changes are successful (and only if they are successful) they
are activated.
"""
def __init__(self, driver, zone):
self.driver = driver
self.connection = driver.connection
self.zone = zone
def __enter__(self):
zid = int(self.zone.id)
self.connection.set_context({'zone_id': self.zone.id})
vid = self.connection.request('domain.zone.version.new', zid).object
self.vid = vid
return vid
def __exit__(self, type, value, traceback):
if not traceback:
zid = int(self.zone.id)
con = self.connection
con.set_context({'zone_id': self.zone.id})
con.request('domain.zone.version.set', zid, self.vid).object
class GandiDNSResponse(GandiResponse):
exceptions = {
581042: ZoneDoesNotExistError,
}
class GandiDNSConnection(GandiConnection):
responseCls = GandiDNSResponse
class GandiDNSDriver(BaseGandiDriver, DNSDriver):
"""
API reference can be found at:
http://doc.rpc.gandi.net/domain/reference.html
"""
type = Provider.GANDI
name = 'Gandi DNS'
website = 'http://www.gandi.net/domain'
connectionCls = GandiDNSConnection
RECORD_TYPE_MAP = {
RecordType.A: 'A',
RecordType.AAAA: 'AAAA',
RecordType.CNAME: 'CNAME',
RecordType.LOC: 'LOC',
RecordType.MX: 'MX',
RecordType.NS: 'NS',
RecordType.SPF: 'SPF',
RecordType.SRV: 'SRV',
RecordType.TXT: 'TXT',
RecordType.WKS: 'WKS',
}
def _to_zone(self, zone):
return Zone(
id=str(zone['id']),
domain=zone['name'],
type='master',
ttl=0,
driver=self,
extra={}
)
def _to_zones(self, zones):
ret = []
for z in zones:
ret.append(self._to_zone(z))
return ret
def list_zones(self):
zones = self.connection.request('domain.zone.list')
return self._to_zones(zones.object)
def get_zone(self, zone_id):
zid = int(zone_id)
self.connection.set_context({'zone_id': zone_id})
zone = self.connection.request('domain.zone.info', zid)
return self._to_zone(zone.object)
def create_zone(self, domain, type='master', ttl=None, extra=None):
params = {
'name': domain,
}
info = self.connection.request('domain.zone.create', params)
return self._to_zone(info.object)
def update_zone(self, zone, domain=None, type=None, ttl=None, extra=None):
zid = int(zone.id)
params = {'name': domain}
self.connection.set_context({'zone_id': zone.id})
zone = self.connection.request('domain.zone.update', zid, params)
return self._to_zone(zone.object)
def delete_zone(self, zone):
zid = int(zone.id)
self.connection.set_context({'zone_id': zone.id})
res = self.connection.request('domain.zone.delete', zid)
return res.object
def _to_record(self, record, zone):
return Record(
id='%s:%s' % (record['type'], record['name']),
name=record['name'],
type=self._string_to_record_type(record['type']),
data=record['value'],
zone=zone,
driver=self,
extra={'ttl': record['ttl']}
)
def _to_records(self, records, zone):
retval = []
for r in records:
retval.append(self._to_record(r, zone))
return retval
def list_records(self, zone):
zid = int(zone.id)
self.connection.set_context({'zone_id': zone.id})
records = self.connection.request('domain.zone.record.list', zid, 0)
return self._to_records(records.object, zone)
def get_record(self, zone_id, record_id):
zid = int(zone_id)
record_type, name = record_id.split(':', 1)
filter_opts = {
'name': name,
'type': record_type
}
self.connection.set_context({'zone_id': zone_id})
records = self.connection.request('domain.zone.record.list',
zid, 0, filter_opts).object
if len(records) == 0:
raise RecordDoesNotExistError(value='', driver=self,
record_id=record_id)
return self._to_record(records[0], self.get_zone(zone_id))
def _validate_record(self, record_id, name, record_type, data, extra):
if len(data) > 1024:
raise RecordError('Record data must be <= 1024 characters',
driver=self, record_id=record_id)
if extra and 'ttl' in extra:
if extra['ttl'] < TTL_MIN:
raise RecordError('TTL must be at least 30 seconds',
driver=self, record_id=record_id)
if extra['ttl'] > TTL_MAX:
raise RecordError('TTL must not excdeed 30 days',
driver=self, record_id=record_id)
def create_record(self, name, zone, type, data, extra=None):
self._validate_record(None, name, type, data, extra)
zid = int(zone.id)
create = {
'name': name,
'type': self.RECORD_TYPE_MAP[type],
'value': data
}
if 'ttl' in extra:
create['ttl'] = extra['ttl']
with NewZoneVersion(self, zone) as vid:
con = self.connection
con.set_context({'zone_id': zone.id})
rec = con.request('domain.zone.record.add',
zid, vid, create).object
return self._to_record(rec, zone)
def update_record(self, record, name, type, data, extra):
self._validate_record(record.id, name, type, data, extra)
filter_opts = {
'name': record.name,
'type': self.RECORD_TYPE_MAP[record.type]
}
update = {
'name': name,
'type': self.RECORD_TYPE_MAP[type],
'value': data
}
if 'ttl' in extra:
update['ttl'] = extra['ttl']
zid = int(record.zone.id)
with NewZoneVersion(self, record.zone) as vid:
con = self.connection
con.set_context({'zone_id': record.zone.id})
con.request('domain.zone.record.delete',
zid, vid, filter_opts)
res = con.request('domain.zone.record.add',
zid, vid, update).object
return self._to_record(res, record.zone)
def delete_record(self, record):
zid = int(record.zone.id)
filter_opts = {
'name': record.name,
'type': self.RECORD_TYPE_MAP[record.type]
}
with NewZoneVersion(self, record.zone) as vid:
con = self.connection
con.set_context({'zone_id': record.zone.id})
count = con.request('domain.zone.record.delete',
zid, vid, filter_opts).object
if count == 1:
return True
raise RecordDoesNotExistError(value='No such record', driver=self,
record_id=record.id)
| |
# pacman.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
Pacman.py holds the logic for the classic pacman game along with the main
code to run a game. This file is divided into three sections:
(i) Your interface to the pacman world:
Pacman is a complex environment. You probably don't want to
read through all of the code we wrote to make the game runs
correctly. This section contains the parts of the code
that you will need to understand in order to complete the
project. There is also some code in game.py that you should
understand.
(ii) The hidden secrets of pacman:
This section contains all of the logic code that the pacman
environment uses to decide who can move where, who dies when
things collide, etc. You shouldn't need to read this section
of code, but you can if you want.
(iii) Framework to start a game:
The final section contains the code for reading the command
you use to set up the game, then starting up a new game, along with
linking in all the external parts (agent functions, graphics).
Check this section out to see all the options available to you.
To play your first game, type 'python pacman.py' from the command line.
The keys are 'a', 's', 'd', and 'w' to move (or arrow keys). Have fun!
"""
from game import GameStateData
from game import Game
from game import Directions
from game import Actions
from util import nearestPoint
from util import manhattanDistance
import util, layout
import sys, types, time, random, os
###################################################
# YOUR INTERFACE TO THE PACMAN WORLD: A GameState #
###################################################
class GameState:
"""
A GameState specifies the full game state, including the food, capsules,
agent configurations and score changes.
GameStates are used by the Game object to capture the actual state of the game and
can be used by agents to reason about the game.
Much of the information in a GameState is stored in a GameStateData object. We
strongly suggest that you access that data via the accessor methods below rather
than referring to the GameStateData object directly.
Note that in classic Pacman, Pacman is always agent 0.
"""
####################################################
# Accessor methods: use these to access state data #
####################################################
# static variable keeps track of which states have had getLegalActions called
explored = set()
def getAndResetExplored():
tmp = GameState.explored.copy()
GameState.explored = set()
return tmp
getAndResetExplored = staticmethod(getAndResetExplored)
def getLegalActions( self, agentIndex=0 ):
"""
Returns the legal actions for the agent specified.
"""
# GameState.explored.add(self)
if self.isWin() or self.isLose(): return []
if agentIndex == 0: # Pacman is moving
return PacmanRules.getLegalActions( self )
else:
return GhostRules.getLegalActions( self, agentIndex )
def generateSuccessor( self, agentIndex, action):
"""
Returns the successor state after the specified agent takes the action.
"""
# Check that successors exist
if self.isWin() or self.isLose(): raise Exception('Can\'t generate a successor of a terminal state.')
# Copy current state
state = GameState(self)
# Let agent's logic deal with its action's effects on the board
if agentIndex == 0: # Pacman is moving
state.data._eaten = [False for i in range(state.getNumAgents())]
PacmanRules.applyAction( state, action )
else: # A ghost is moving
GhostRules.applyAction( state, action, agentIndex )
# Time passes
if agentIndex == 0:
state.data.scoreChange += -TIME_PENALTY # Penalty for waiting around
else:
GhostRules.decrementTimer( state.data.agentStates[agentIndex] )
# Resolve multi-agent effects
GhostRules.checkDeath( state, agentIndex )
# Book keeping
state.data._agentMoved = agentIndex
state.data.score += state.data.scoreChange
GameState.explored.add(self)
GameState.explored.add(state)
return state
def getLegalPacmanActions( self ):
return self.getLegalActions( 0 )
def generatePacmanSuccessor( self, action ):
"""
Generates the successor state after the specified pacman move
"""
return self.generateSuccessor( 0, action )
def getPacmanState( self ):
"""
Returns an AgentState object for pacman (in game.py)
state.pos gives the current position
state.direction gives the travel vector
"""
return self.data.agentStates[0].copy()
def getPacmanPosition( self ):
return self.data.agentStates[0].getPosition()
def getGhostStates( self ):
return self.data.agentStates[1:]
def getGhostState( self, agentIndex ):
if agentIndex == 0 or agentIndex >= self.getNumAgents():
raise Exception("Invalid index passed to getGhostState")
return self.data.agentStates[agentIndex]
def getGhostPosition( self, agentIndex ):
if agentIndex == 0:
raise Exception("Pacman's index passed to getGhostPosition")
return self.data.agentStates[agentIndex].getPosition()
def getGhostPositions(self):
return [s.getPosition() for s in self.getGhostStates()]
def getNumAgents( self ):
return len( self.data.agentStates )
def getScore( self ):
return float(self.data.score)
def getCapsules(self):
"""
Returns a list of positions (x,y) of the remaining capsules.
"""
return self.data.capsules
def getNumFood( self ):
return self.data.food.count()
def getFood(self):
"""
Returns a Grid of boolean food indicator variables.
Grids can be accessed via list notation, so to check
if there is food at (x,y), just call
currentFood = state.getFood()
if currentFood[x][y] == True: ...
"""
return self.data.food
def getWalls(self):
"""
Returns a Grid of boolean wall indicator variables.
Grids can be accessed via list notation, so to check
if there is a wall at (x,y), just call
walls = state.getWalls()
if walls[x][y] == True: ...
"""
return self.data.layout.walls
def hasFood(self, x, y):
return self.data.food[x][y]
def hasWall(self, x, y):
return self.data.layout.walls[x][y]
def isLose( self ):
return self.data._lose
def isWin( self ):
return self.data._win
#############################################
# Helper methods: #
# You shouldn't need to call these directly #
#############################################
def __init__( self, prevState = None ):
"""
Generates a new state by copying information from its predecessor.
"""
if prevState != None: # Initial state
self.data = GameStateData(prevState.data)
else:
self.data = GameStateData()
def deepCopy( self ):
state = GameState( self )
state.data = self.data.deepCopy()
return state
def __eq__( self, other ):
"""
Allows two states to be compared.
"""
return hasattr(other, 'data') and self.data == other.data
def __hash__( self ):
"""
Allows states to be keys of dictionaries.
"""
return hash( self.data )
def __str__( self ):
return str(self.data)
def initialize( self, layout, numGhostAgents=1000 ):
"""
Creates an initial game state from a layout array (see layout.py).
"""
self.data.initialize(layout, numGhostAgents)
############################################################################
# THE HIDDEN SECRETS OF PACMAN #
# #
# You shouldn't need to look through the code in this section of the file. #
############################################################################
SCARED_TIME = 40 # Moves ghosts are scared
COLLISION_TOLERANCE = 0.7 # How close ghosts must be to Pacman to kill
TIME_PENALTY = 1 # Number of points lost each round
class ClassicGameRules:
"""
These game rules manage the control flow of a game, deciding when
and how the game starts and ends.
"""
def __init__(self, timeout=30):
self.timeout = timeout
def newGame( self, layout, pacmanAgent, ghostAgents, display, quiet = False, catchExceptions=False):
agents = [pacmanAgent] + ghostAgents[:layout.getNumGhosts()]
initState = GameState()
initState.initialize( layout, len(ghostAgents) )
game = Game(agents, display, self, catchExceptions=catchExceptions)
game.state = initState
self.initialState = initState.deepCopy()
self.quiet = quiet
return game
def process(self, state, game):
"""
Checks to see whether it is time to end the game.
"""
if state.isWin(): self.win(state, game)
if state.isLose(): self.lose(state, game)
def win( self, state, game ):
if not self.quiet: print "Pacman emerges victorious! Score: %d" % state.data.score
game.gameOver = True
def lose( self, state, game ):
if not self.quiet: print "Pacman died! Score: %d" % state.data.score
game.gameOver = True
def getProgress(self, game):
return float(game.state.getNumFood()) / self.initialState.getNumFood()
def agentCrash(self, game, agentIndex):
if agentIndex == 0:
print "Pacman crashed"
else:
print "A ghost crashed"
def getMaxTotalTime(self, agentIndex):
return self.timeout
def getMaxStartupTime(self, agentIndex):
return self.timeout
def getMoveWarningTime(self, agentIndex):
return self.timeout
def getMoveTimeout(self, agentIndex):
return self.timeout
def getMaxTimeWarnings(self, agentIndex):
return 0
class PacmanRules:
"""
These functions govern how pacman interacts with his environment under
the classic game rules.
"""
PACMAN_SPEED=1
def getLegalActions( state ):
"""
Returns a list of possible actions.
"""
return Actions.getPossibleActions( state.getPacmanState().configuration, state.data.layout.walls )
getLegalActions = staticmethod( getLegalActions )
def applyAction( state, action ):
"""
Edits the state to reflect the results of the action.
"""
legal = PacmanRules.getLegalActions( state )
if action not in legal:
raise Exception("Illegal action " + str(action))
pacmanState = state.data.agentStates[0]
# Update Configuration
vector = Actions.directionToVector( action, PacmanRules.PACMAN_SPEED )
pacmanState.configuration = pacmanState.configuration.generateSuccessor( vector )
# Eat
next = pacmanState.configuration.getPosition()
nearest = nearestPoint( next )
if manhattanDistance( nearest, next ) <= 0.5 :
# Remove food
PacmanRules.consume( nearest, state )
applyAction = staticmethod( applyAction )
def consume( position, state ):
x,y = position
# Eat food
if state.data.food[x][y]:
state.data.scoreChange += 10
state.data.food = state.data.food.copy()
state.data.food[x][y] = False
state.data._foodEaten = position
# TODO: cache numFood?
numFood = state.getNumFood()
if numFood == 0 and not state.data._lose:
state.data.scoreChange += 500
state.data._win = True
# Eat capsule
if( position in state.getCapsules() ):
state.data.capsules.remove( position )
state.data._capsuleEaten = position
# Reset all ghosts' scared timers
for index in range( 1, len( state.data.agentStates ) ):
state.data.agentStates[index].scaredTimer = SCARED_TIME
consume = staticmethod( consume )
class GhostRules:
"""
These functions dictate how ghosts interact with their environment.
"""
GHOST_SPEED=1.0
def getLegalActions( state, ghostIndex ):
"""
Ghosts cannot stop, and cannot turn around unless they
reach a dead end, but can turn 90 degrees at intersections.
"""
conf = state.getGhostState( ghostIndex ).configuration
possibleActions = Actions.getPossibleActions( conf, state.data.layout.walls )
reverse = Actions.reverseDirection( conf.direction )
if Directions.STOP in possibleActions:
possibleActions.remove( Directions.STOP )
if reverse in possibleActions and len( possibleActions ) > 1:
possibleActions.remove( reverse )
return possibleActions
getLegalActions = staticmethod( getLegalActions )
def applyAction( state, action, ghostIndex):
legal = GhostRules.getLegalActions( state, ghostIndex )
if action not in legal:
raise Exception("Illegal ghost action " + str(action))
ghostState = state.data.agentStates[ghostIndex]
speed = GhostRules.GHOST_SPEED
if ghostState.scaredTimer > 0: speed /= 2.0
vector = Actions.directionToVector( action, speed )
ghostState.configuration = ghostState.configuration.generateSuccessor( vector )
applyAction = staticmethod( applyAction )
def decrementTimer( ghostState):
timer = ghostState.scaredTimer
if timer == 1:
ghostState.configuration.pos = nearestPoint( ghostState.configuration.pos )
ghostState.scaredTimer = max( 0, timer - 1 )
decrementTimer = staticmethod( decrementTimer )
def checkDeath( state, agentIndex):
pacmanPosition = state.getPacmanPosition()
if agentIndex == 0: # Pacman just moved; Anyone can kill him
for index in range( 1, len( state.data.agentStates ) ):
ghostState = state.data.agentStates[index]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill( pacmanPosition, ghostPosition ):
GhostRules.collide( state, ghostState, index )
else:
ghostState = state.data.agentStates[agentIndex]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill( pacmanPosition, ghostPosition ):
GhostRules.collide( state, ghostState, agentIndex )
checkDeath = staticmethod( checkDeath )
def collide( state, ghostState, agentIndex):
if ghostState.scaredTimer > 0:
state.data.scoreChange += 200
GhostRules.placeGhost(state, ghostState)
ghostState.scaredTimer = 0
# Added for first-person
state.data._eaten[agentIndex] = True
else:
if not state.data._win:
state.data.scoreChange -= 500
state.data._lose = True
collide = staticmethod( collide )
def canKill( pacmanPosition, ghostPosition ):
return manhattanDistance( ghostPosition, pacmanPosition ) <= COLLISION_TOLERANCE
canKill = staticmethod( canKill )
def placeGhost(state, ghostState):
ghostState.configuration = ghostState.start
placeGhost = staticmethod( placeGhost )
#############################
# FRAMEWORK TO START A GAME #
#############################
def default(str):
return str + ' [Default: %default]'
def parseAgentArgs(str):
if str == None: return {}
pieces = str.split(',')
opts = {}
for p in pieces:
if '=' in p:
key, val = p.split('=')
else:
key,val = p, 1
opts[key] = val
return opts
def readCommand( argv ):
"""
Processes the command used to run pacman from the command line.
"""
from optparse import OptionParser
usageStr = """
USAGE: python pacman.py <options>
EXAMPLES: (1) python pacman.py
- starts an interactive game
(2) python pacman.py --layout smallClassic --zoom 2
OR python pacman.py -l smallClassic -z 2
- starts an interactive game on a smaller board, zoomed in
"""
parser = OptionParser(usageStr)
parser.add_option('-n', '--numGames', dest='numGames', type='int',
help=default('the number of GAMES to play'), metavar='GAMES', default=1)
parser.add_option('-l', '--layout', dest='layout',
help=default('the LAYOUT_FILE from which to load the map layout'),
metavar='LAYOUT_FILE', default='mediumClassic')
parser.add_option('-p', '--pacman', dest='pacman',
help=default('the agent TYPE in the pacmanAgents module to use'),
metavar='TYPE', default='KeyboardAgent')
parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',
help='Display output as text only', default=False)
parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',
help='Generate minimal output and no graphics', default=False)
parser.add_option('-g', '--ghosts', dest='ghost',
help=default('the ghost agent TYPE in the ghostAgents module to use'),
metavar = 'TYPE', default='RandomGhost')
parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',
help=default('The maximum number of ghosts to use'), default=4)
parser.add_option('-z', '--zoom', type='float', dest='zoom',
help=default('Zoom the size of the graphics window'), default=1.0)
parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',
help='Fixes the random seed to always play the same game', default=False)
parser.add_option('-r', '--recordActions', action='store_true', dest='record',
help='Writes game histories to a file (named by the time they were played)', default=False)
parser.add_option('--replay', dest='gameToReplay',
help='A recorded game file (pickle) to replay', default=None)
parser.add_option('-a','--agentArgs',dest='agentArgs',
help='Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"')
parser.add_option('-x', '--numTraining', dest='numTraining', type='int',
help=default('How many episodes are training (suppresses output)'), default=0)
parser.add_option('--frameTime', dest='frameTime', type='float',
help=default('Time to delay between frames; <0 means keyboard'), default=0.1)
parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',
help='Turns on exception handling and timeouts during games', default=False)
parser.add_option('--timeout', dest='timeout', type='int',
help=default('Maximum length of time an agent can spend computing in a single game'), default=30)
options, otherjunk = parser.parse_args(argv)
if len(otherjunk) != 0:
raise Exception('Command line input not understood: ' + str(otherjunk))
args = dict()
# Fix the random seed
if options.fixRandomSeed: random.seed('PacMan')
# Choose a layout
args['layout'] = layout.getLayout( options.layout )
if args['layout'] == None: raise Exception("The layout " + options.layout + " cannot be found")
# Choose a Pacman agent
noKeyboard = options.gameToReplay == None and (options.textGraphics or options.quietGraphics)
pacmanType = loadAgent(options.pacman, noKeyboard)
agentOpts = parseAgentArgs(options.agentArgs)
if options.numTraining > 0:
args['numTraining'] = options.numTraining
if 'numTraining' not in agentOpts: agentOpts['numTraining'] = options.numTraining
pacman = pacmanType(**agentOpts) # Instantiate Pacman with agentArgs
args['pacman'] = pacman
# Don't display training games
if 'numTrain' in agentOpts:
options.numQuiet = int(agentOpts['numTrain'])
options.numIgnore = int(agentOpts['numTrain'])
# Choose a ghost agent
ghostType = loadAgent(options.ghost, noKeyboard)
args['ghosts'] = [ghostType( i+1 ) for i in range( options.numGhosts )]
# Choose a display format
if options.quietGraphics:
import textDisplay
args['display'] = textDisplay.NullGraphics()
elif options.textGraphics:
import textDisplay
textDisplay.SLEEP_TIME = options.frameTime
args['display'] = textDisplay.PacmanGraphics()
else:
import graphicsDisplay
args['display'] = graphicsDisplay.PacmanGraphics(options.zoom, frameTime = options.frameTime)
args['numGames'] = options.numGames
args['record'] = options.record
args['catchExceptions'] = options.catchExceptions
args['timeout'] = options.timeout
# Special case: recorded games don't use the runGames method or args structure
if options.gameToReplay != None:
print 'Replaying recorded game %s.' % options.gameToReplay
import cPickle
f = open(options.gameToReplay)
try: recorded = cPickle.load(f)
finally: f.close()
recorded['display'] = args['display']
replayGame(**recorded)
sys.exit(0)
return args
def loadAgent(pacman, nographics):
# Looks through all pythonPath Directories for the right module,
pythonPathStr = os.path.expandvars("$PYTHONPATH")
if pythonPathStr.find(';') == -1:
pythonPathDirs = pythonPathStr.split(':')
else:
pythonPathDirs = pythonPathStr.split(';')
pythonPathDirs.append('.')
for moduleDir in pythonPathDirs:
if not os.path.isdir(moduleDir): continue
moduleNames = [f for f in os.listdir(moduleDir) if f.endswith('gents.py')]
for modulename in moduleNames:
try:
module = __import__(modulename[:-3])
except ImportError:
continue
if pacman in dir(module):
if nographics and modulename == 'keyboardAgents.py':
raise Exception('Using the keyboard requires graphics (not text display)')
return getattr(module, pacman)
raise Exception('The agent ' + pacman + ' is not specified in any *Agents.py.')
def replayGame( layout, actions, display ):
import pacmanAgents, ghostAgents
rules = ClassicGameRules()
agents = [pacmanAgents.GreedyAgent()] + [ghostAgents.RandomGhost(i+1) for i in range(layout.getNumGhosts())]
game = rules.newGame( layout, agents[0], agents[1:], display )
state = game.state
display.initialize(state.data)
for action in actions:
# Execute the action
state = state.generateSuccessor( *action )
# Change the display
display.update( state.data )
# Allow for game specific conditions (winning, losing, etc.)
rules.process(state, game)
display.finish()
def runGames( layout, pacman, ghosts, display, numGames, record, numTraining = 0, catchExceptions=False, timeout=30 ):
import __main__
__main__.__dict__['_display'] = display
rules = ClassicGameRules(timeout)
games = []
for i in range( numGames ):
beQuiet = i < numTraining
if beQuiet:
# Suppress output and graphics
import textDisplay
gameDisplay = textDisplay.NullGraphics()
rules.quiet = True
else:
gameDisplay = display
rules.quiet = False
game = rules.newGame( layout, pacman, ghosts, gameDisplay, beQuiet, catchExceptions)
game.run()
if not beQuiet: games.append(game)
if record:
import time, cPickle
fname = ('recorded-game-%d' % (i + 1)) + '-'.join([str(t) for t in time.localtime()[1:6]])
f = file(fname, 'w')
components = {'layout': layout, 'actions': game.moveHistory}
cPickle.dump(components, f)
f.close()
if (numGames-numTraining) > 0:
scores = [game.state.getScore() for game in games]
wins = [game.state.isWin() for game in games]
winRate = wins.count(True)/ float(len(wins))
print 'Average Score:', sum(scores) / float(len(scores))
print 'Scores: ', ', '.join([str(score) for score in scores])
print 'Win Rate: %d/%d (%.2f)' % (wins.count(True), len(wins), winRate)
print 'Record: ', ', '.join([ ['Loss', 'Win'][int(w)] for w in wins])
return games
if __name__ == '__main__':
"""
The main function called when pacman.py is run
from the command line:
> python pacman.py
See the usage string for more details.
> python pacman.py --help
"""
args = readCommand( sys.argv[1:] ) # Get game components based on input
runGames( **args )
# import cProfile
# cProfile.run("runGames( **args )")
pass
| |
#!python
# -*- coding: utf-8 -*-
"""
Skyspark Client support
"""
import hszinc
from six import string_types
from .session import HaystackSession
from .ops.vendor.skyspark import SkysparkAuthenticateOperation
from .ops.vendor.skyspark_scram import SkysparkScramAuthenticateOperation
from .mixins.vendor.skyspark import evalexpr
class SkysparkHaystackSession(HaystackSession, evalexpr.EvalOpsMixin):
"""
The SkysparkHaystackSession class implements some base support for
Skyspark servers.
"""
_AUTH_OPERATION = SkysparkAuthenticateOperation
def __init__(self, uri, username, password, project="", **kwargs):
"""
Initialise a Skyspark Project Haystack session handler.
:param uri: Base URI for the Haystack installation.
:param username: Authentication user name.
:param password: Authentication password.
:param project: Skyspark project name
"""
super(SkysparkHaystackSession, self).__init__(uri, "api/%s" % project, **kwargs)
self._project = project
self._username = username
self._password = password
self._authenticated = False
@property
def is_logged_in(self):
"""
Return true if the user is logged in.
"""
return self._authenticated
# Private methods/properties
def _on_authenticate_done(self, operation, **kwargs):
"""
Process the result of an authentication operation. This needs to be
implemented in the subclass and should, at minimum, set a flag in the
subclass to indicate the authentication state and clear the _auth_op
attribute on the base class.
"""
try:
cookies = operation.result
self._authenticated = True
self._client.cookies = cookies
except:
self._authenticated = False
self._client.cookies = None
finally:
self._auth_op = None
class SkysparkScramHaystackSession(HaystackSession, evalexpr.EvalOpsMixin):
"""
The SkysparkHaystackSession class implements some base support for
Skyspark servers.
"""
_AUTH_OPERATION = SkysparkScramAuthenticateOperation
def __init__(self, uri, username, password, project, http_args=None, **kwargs):
"""
Initialise a Skyspark Project Haystack session handler.
:param uri: Base URI for the Haystack installation.
:param username: Authentication user name.
:param password: Authentication password.
:param project: Skyspark project name
"""
# Skyspark is allergic to requests.Session we must turn it off.
http_args = http_args or {}
http_args["requests_session"] = False
super(SkysparkScramHaystackSession, self).__init__(
uri, "api/%s" % project, http_args=http_args, **kwargs
)
self._username = username
self._password = password
self._project = project
self._authenticated = False
self._authToken = None
self._attestKey = None
@property
def is_logged_in(self):
"""
Return true if the user is logged in.
"""
return self._authenticated
# Private methods/properties
# For _get_grid, _post_grid, wrap the superclass version with a version
# that defaults to exclude_cookies=True. This is because SkySpark gets
# confused and demands an attestation key if we round-trip its cookies.
def _get_grid(
self,
uri,
callback,
expect_format=None,
cache=False,
exclude_cookies=True,
**kwargs
):
return super(SkysparkScramHaystackSession, self)._get_grid(
uri=uri,
callback=callback,
expect_format=expect_format,
cache=cache,
exclude_cookies=exclude_cookies,
**kwargs
)
def _post_grid(
self,
uri,
grid,
callback,
expect_format=None,
cache=False,
exclude_cookies=True,
**kwargs
):
return super(SkysparkScramHaystackSession, self)._post_grid(
uri=uri,
grid=grid,
callback=callback,
expect_format=expect_format,
cache=cache,
exclude_cookies=exclude_cookies,
**kwargs
)
def _on_authenticate_done(self, operation, **kwargs):
"""
Process the result of an authentication operation. This needs to be
implemented in the subclass and should, at minimum, set a flag in the
subclass to indicate the authentication state and clear the _auth_op
attribute on the base class.
"""
try:
op_result = operation.result
header = op_result["header"]
self._authenticated = True
self._client.cookies = None
self._client.headers = header
except:
self._authenticated = False
self._client.cookies = None
finally:
self._auth_op = None
def logout(self):
"""close session when leaving context by trick given by Brian Frank
https://www.skyfoundry.com/forum/topic/5282#c1
but beware that this is not standard!"""
# TODO: Rewrite this when a standard way to close sessions is
# implemented in Skyspark.
def callback(response):
try:
status_code = response.status_code
except AttributeError as error:
status_code = -1
if status_code != 200:
self._log.warning("Failed to close skyspark session")
self._log.warning("status_code={}".format(status_code))
else:
self._log.info("You've been properly disconnected")
self._get("/user/logout", callback, api=False)
def _on_his_read(self, point, rng, callback, **kwargs):
"""
Skyspark will not accept GET request for his_read by default
[ref : https://project-haystack.org/forum/topic/787#c6]
The default behavior of SkySpark is now to disallow GET requests
non-idempotent operations. So its still allowed on certain operations
such as about, formats, read. However as Chris said it can be toggled
back on using Settings|API for backward compatibility.
However as a recommendation I think we should always be using POST as
a safer alternative. Using GET for ops with side-effects is against
the HTTP spec. Plus it is an attack vector if cookies are involved.
And it provides a more precise way to pass the request payload.
Its not really from a theoretical perspective. But in SkySpark
we allow customers to generate histories using their own custom
functions. So from a security perspective we took the safest route
and consider it to potentially have side effects.
If your code is all using GET, then just have the customer set
Settings|API allowGetWithSideEffects flag to false and it should all work.
"""
if isinstance(rng, slice):
str_rng = ",".join([hszinc.dump_scalar(p) for p in (rng.start, rng.stop)])
elif not isinstance(rng, string_types):
str_rng = hszinc.dump_scalar(rng)
else:
# No conversion here as this will be added to the grid as-is
str_rng = rng
his_grid = hszinc.Grid()
his_grid.metadata["id"] = self._obj_to_ref(point)
his_grid.column["id"] = {}
his_grid.column["range"] = {}
his_grid.append({"id": self._obj_to_ref(point), "range": str_rng})
return self._post_grid("hisRead", his_grid, callback, **kwargs)
| |
from __future__ import absolute_import, print_function
"""
Testing import statement with filefinder2
"""
import os
import sys
import unittest
import pytest # we need to use pytest marker to get __package__ to get the proper value, making relative import works
# CAREFUL : it seems this does have side effect in pytest modules and hooks setup.
try:
from ._utils import print_importers, xfail_py2_noff2, xfail_py2_noactive
except ValueError: # "Attempted relative import in non-package" when running standalone
from _utils import print_importers, xfail_py2_noff2, xfail_py2_noactive
#
# Note : we cannot assume anything about import implementation (different python version, different version of pytest)
# => we need to test them all...
#
# We need to test implicit namespace packages PEP 420 (especially for python 2.7)
# Since we rely on it for ros import.
# But we can only test relative package structure
# TODO : depending on the python version we aim to support, we might be able to drop some tests here...
@pytest.mark.usefixtures("importlib")
class TestImplicitNamespace(unittest.TestCase):
"""
Testing PEP 420
"""
@classmethod
def setUpClass(cls):
# we compile the bytecode with the testing python interpreter
import py_compile
source_py = os.path.join(os.path.dirname(__file__), 'nspkg', 'subpkg', 'bytecode.py')
dest_pyc = os.path.join(os.path.dirname(__file__), 'nspkg', 'subpkg', 'bytecode.pyc') # CAREFUL where
py_compile.compile(source_py, dest_pyc, doraise=True)
source_py = os.path.join(os.path.dirname(__file__), 'pkg', 'bytecode.py')
dest_pyc = os.path.join(os.path.dirname(__file__), 'pkg', 'bytecode.pyc') # CAREFUL where
py_compile.compile(source_py, dest_pyc, doraise=True)
def setUp(self):
# filefinder2 will be imported and activated depending on command line options. see conftext.py
assert hasattr(self, "importlib")
# Using __import__
@xfail_py2_noff2
def test_importlib_import_relative_pkg(self):
"""Verify that package is importable relatively"""
print_importers()
assert __package__
# need globals to handle relative imports
# __import__ checks sys.modules by itself
# but the test is not reflecting anything if we use the already loaded module.
if sys.modules.get(__package__ + '.pkg'):
self.fail("module {0} previously loaded. You might need to fix your tests to run with --forked.".format(__package__ + '.pkg'))
else:
pkg = self.importlib.__import__('pkg', globals=globals(), level=1)
test_pkg = pkg
self.assertTrue(test_pkg is not None)
self.assertTrue(test_pkg.TestClassInSubPkg is not None)
self.assertTrue(callable(test_pkg.TestClassInSubPkg))
# TODO : implement some differences and check we get them...
if hasattr(self.importlib, 'reload'): # recent version of importlib
# attempting to reload
self.importlib.reload(test_pkg)
else:
pass
@xfail_py2_noff2
def test_importlib_import_relative_pkg_submodule(self):
"""Verify that package is importable relatively"""
print_importers()
assert __package__
# need globals to handle relative imports
# __import__ checks sys.modules by itself
# but the test is not reflecting anything if we use the already loaded module.
if sys.modules.get(__package__ + '.pkg.submodule'):
self.fail("module {0} previously loaded. You might need to fix your tests to run with --forked.".format(__package__ + '.pkg.submodule'))
else:
pkg = self.importlib.__import__('pkg.submodule', globals=globals(), level=1)
test_mod = pkg.submodule
self.assertTrue(test_mod is not None)
self.assertTrue(test_mod.TestClassInSubModule is not None)
self.assertTrue(callable(test_mod.TestClassInSubModule))
# TODO : implement some differences and check we get them...
if hasattr(self.importlib, 'reload'): # recent version of importlib
# attempting to reload
self.importlib.reload(test_mod)
else:
pass
@xfail_py2_noff2
def test_importlib_import_relative_pkg_bytecode(self):
"""Verify that package is importable relatively"""
print_importers()
assert __package__
# need globals to handle relative imports
# __import__ checks sys.modules by itself
# but the test is not reflecting anything if we use the already loaded module.
if sys.modules.get(__package__ + '.pkg.bytecode'):
self.fail("module {0} previously loaded. You might need to fix your tests to run with --forked.".format(__package__ + '.pkg.bytecode'))
else:
pkg = self.importlib.__import__('pkg.bytecode', globals=globals(), level=1)
test_mod = pkg.bytecode
self.assertTrue(test_mod is not None)
self.assertTrue(test_mod.TestClassInBytecode is not None)
self.assertTrue(callable(test_mod.TestClassInBytecode))
# TODO : implement some differences and check we get them...
if hasattr(self.importlib, 'reload'): # recent version of importlib
# attempting to reload
self.importlib.reload(test_mod)
else:
pass
@xfail_py2_noff2
def test_importlib_import_class_from_relative_pkg(self):
"""Verify that message class is importable relatively"""
print_importers()
assert __package__
# need globals to handle relative imports
# __import__ checks sys.modules by itself
# but the test is not reflecting anything if we use the already loaded module.
if sys.modules.get(__package__ + '.pkg'):
self.fail("module {0} previously loaded. You might need to fix your tests to run with --forked.".format(__package__ + '.pkg'))
else:
pkg = self.importlib.__import__('pkg', globals=globals(), level=1)
test_class_in_subpkg = pkg.TestClassInSubPkg
self.assertTrue(test_class_in_subpkg is not None)
self.assertTrue(callable(test_class_in_subpkg))
# TODO : implement some differences and check we get them...
if hasattr(self.importlib, 'reload'): # recent version of importlib
# attempting to reload
self.importlib.reload(pkg)
else:
pass
@xfail_py2_noff2
def test_importlib_import_class_from_relative_pkg_submodule(self):
"""Verify that package is importable relatively"""
print_importers()
assert __package__
# need globals to handle relative imports
# __import__ checks sys.modules by itself
# but the test is not reflecting anything if we use the already loaded module.
if sys.modules.get(__package__ + '.pkg.submodule'):
self.fail("module {0} previously loaded. You might need to fix your tests to run with --forked.".format(__package__ + '.pkg.submodule'))
else:
pkg = self.importlib.__import__('pkg.submodule', globals=globals(), level=1)
test_class_in_submodule = pkg.submodule.TestClassInSubModule
self.assertTrue(test_class_in_submodule is not None)
self.assertTrue(callable(test_class_in_submodule))
# TODO : implement some differences and check we get them...
if hasattr(self.importlib, 'reload'): # recent version of importlib
# attempting to reload
self.importlib.reload(pkg)
else:
pass
@xfail_py2_noff2
def test_importlib_import_class_from_relative_pkg_bytecode(self):
"""Verify that package is importable relatively"""
print_importers()
assert __package__
# need globals to handle relative imports
# __import__ checks sys.modules by itself
# but the test is not reflecting anything if we use the already loaded module.
if sys.modules.get(__package__ + '.pkg.bytecode'):
self.fail("module {0} previously loaded. You might need to fix your tests to run with --forked.".format(__package__ + '.pkg.bytecode'))
else:
pkg = self.importlib.__import__('pkg.bytecode', globals=globals(), level=1)
test_class_in_bytecode = pkg.bytecode.TestClassInBytecode
self.assertTrue(test_class_in_bytecode is not None)
self.assertTrue(callable(test_class_in_bytecode))
# TODO : implement some differences and check we get them...
if hasattr(self.importlib, 'reload'): # recent version of importlib
# attempting to reload
self.importlib.reload(pkg)
else:
pass
@xfail_py2_noff2
def test_importlib_import_relative_badpkg_raises(self):
"""Verify that package is importable relatively"""
print_importers()
assert __package__
# __import__ checks sys.modules by itself
# but the test is not reflecting anything if we use the already loaded module.
if sys.modules.get(__package__ + '.badpkg'):
self.fail("module {0} previously loaded. You might need to fix your tests to run with --forked.".format(__package__ + '.badpkg'))
else:
with self.assertRaises(ImportError):
self.importlib.__import__('badpkg', globals=globals(),
level=1) # need globals to handle relative imports
@xfail_py2_noff2
@xfail_py2_noactive
def test_importlib_import_relative_ns_subpkg(self):
"""Verify that package is importable relatively"""
print_importers()
assert __package__
# __import__ checks sys.modules by itself
# but the test is not reflecting anything if we use the already loaded module.
if sys.modules.get(__package__ + '.nspkg.subpkg'):
self.fail("module {0} previously loaded. You might need to fix your tests to run with --forked.".format(__package__ + '.nspkg.subpkg'))
else:
nspkg = self.importlib.__import__('nspkg.subpkg', globals=globals(),
level=1) # need globals to handle relative imports
test_pkg = nspkg.subpkg
self.assertTrue(test_pkg is not None)
self.assertTrue(test_pkg.TestClassInSubPkg is not None)
self.assertTrue(callable(test_pkg.TestClassInSubPkg))
# TODO : implement some differences and check we get them...
if hasattr(self.importlib, 'reload'): # recent version of importlib
# attempting to reload
self.importlib.reload(test_pkg)
else:
pass
@xfail_py2_noff2
@xfail_py2_noactive
def test_importlib_import_relative_ns_subpkg_submodule(self):
"""Verify that package is importable relatively"""
print_importers()
assert __package__
# __import__ checks sys.modules by itself
# but the test is not reflecting anything if we use the already loaded module.
if sys.modules.get(__package__ + '.nspkg.subpkg.submodule'):
self.fail("module {0} previously loaded. You might need to fix your tests to run with --forked.".format(__package__ + '.nspkg.subpkg.submodule'))
else:
nspkg = self.importlib.__import__('nspkg.subpkg.submodule', globals=globals(),
level=1) # need globals to handle relative imports
test_mod = nspkg.subpkg.submodule
self.assertTrue(test_mod is not None)
self.assertTrue(test_mod.TestClassInSubModule is not None)
self.assertTrue(callable(test_mod.TestClassInSubModule))
# TODO : implement some differences and check we get them...
if hasattr(self.importlib, 'reload'): # recent version of importlib
# attempting to reload
self.importlib.reload(test_mod)
else:
pass
@xfail_py2_noff2
@xfail_py2_noactive
def test_importlib_import_relative_ns_subpkg_bytecode(self):
"""Verify that package is importable relatively"""
print_importers()
assert __package__
# __import__ checks sys.modules by itself
# but the test is not reflecting anything if we use the already loaded module.
if sys.modules.get(__package__ + '.nspkg.subpkg.bytecode'):
self.fail("module {0} previously loaded. You might need to fix your tests to run with --forked.".format(__package__ + '.nspkg.subpkg.bytecode'))
else:
nspkg = self.importlib.__import__('nspkg.subpkg.bytecode', globals=globals(),
level=1) # need globals to handle relative imports
test_mod = nspkg.subpkg.bytecode
self.assertTrue(test_mod is not None)
self.assertTrue(test_mod.TestClassInBytecode is not None)
self.assertTrue(callable(test_mod.TestClassInBytecode))
# TODO : implement some differences and check we get them...
if hasattr(self.importlib, 'reload'): # recent version of importlib
# attempting to reload
self.importlib.reload(test_mod)
else:
pass
@xfail_py2_noff2
@xfail_py2_noactive
def test_importlib_import_class_from_relative_ns_subpkg(self):
"""Verify that message class is importable relatively"""
print_importers()
assert __package__
# __import__ checks sys.modules by itself
# but the test is not reflecting anything if we use the already loaded module.
if sys.modules.get(__package__ + '.nspkg.subpkg'):
self.fail("module {0} previously loaded. You might need to fix your tests to run with --forked.".format(__package__ + '.nspkg.subpkg'))
else:
nspkg = self.importlib.__import__('nspkg.subpkg', globals=globals(),
level=1) # need globals to handle relative imports
test_class_in_subpkg = nspkg.subpkg.TestClassInSubPkg
self.assertTrue(test_class_in_subpkg is not None)
self.assertTrue(callable(test_class_in_subpkg))
# TODO : implement some differences and check we get them...
if hasattr(self.importlib, 'reload'): # recent version of importlib
# attempting to reload
self.importlib.reload(nspkg)
else:
pass
@xfail_py2_noff2
@xfail_py2_noactive
def test_importlib_import_class_from_relative_ns_subpkg_submodule(self):
"""Verify that package is importable relatively"""
print_importers()
assert __package__
# __import__ checks sys.modules by itself
# but the test is not reflecting anything if we use the already loaded module.
if sys.modules.get(__package__ + '.nspkg.subpkg.submodule'):
self.fail("module {0} previously loaded. You might need to fix your tests to run with --forked.".format(__package__ + '.nspkg.subpkg.submodule'))
else:
nspkg = self.importlib.__import__('nspkg.subpkg.submodule', globals=globals(),
level=1) # need globals to handle relative imports
test_class_in_submodule = nspkg.subpkg.submodule.TestClassInSubModule
self.assertTrue(test_class_in_submodule is not None)
self.assertTrue(callable(test_class_in_submodule))
# TODO : implement some differences and check we get them...
if hasattr(self.importlib, 'reload'): # recent version of importlib
# attempting to reload
self.importlib.reload(nspkg)
else:
pass
@xfail_py2_noff2
@xfail_py2_noactive
def test_importlib_import_class_from_relative_ns_subpkg_bytecode(self):
"""Verify that package is importable relatively"""
print_importers()
assert __package__
# __import__ checks sys.modules by itself
# but the test is not reflecting anything if we use the already loaded module.
if sys.modules.get(__package__ + '.nspkg.subpkg.bytecode'):
self.fail("module {0} previously loaded. You might need to fix your tests to run with --forked.".format(__package__ + '.nspkg.subpkg.bytecode'))
else:
nspkg = self.importlib.__import__('nspkg.subpkg.bytecode', globals=globals(),
level=1) # need globals to handle relative imports
test_class_in_bytecode = nspkg.subpkg.bytecode.TestClassInBytecode
self.assertTrue(test_class_in_bytecode is not None)
self.assertTrue(callable(test_class_in_bytecode))
# TODO : implement some differences and check we get them...
if hasattr(self.importlib, 'reload'): # recent version of importlib
# attempting to reload
self.importlib.reload(nspkg)
else:
pass
@xfail_py2_noff2
def test_importlib_import_relative_nonnspkg_raises(self):
"""Verify that bad package is not importable relatively"""
print_importers()
assert __package__
# __import__ checks sys.modules by itself
# but the test is not reflecting anything if we use the already loaded module.
if sys.modules.get(__package__ + '.bad_nspkg.bad_subpkg'):
self.fail("module {0} previously loaded. You might need to fix your tests to run with --forked.".format(__package__ + '.bad_nspkg.bad_subpkg'))
else:
with self.assertRaises(ImportError):
self.importlib.__import__('bad_nspkg.bad_subpkg', globals=globals(), level=1) # need globals to handle relative imports
if __name__ == '__main__':
import pytest
# testing current python capabilities
pytest.main(['-v', '-s', '--noff2', '-x', __file__, '--forked'])
# testing importing ff2 provides basic importlib API without disturbing anything
pytest.main(['-v', '-s', '--noactive', '-x', __file__, '--forked'])
# testing ff2 features
pytest.main(['-v', '-s', '-x', __file__, '--forked'])
| |
"""The tests for the TCP sensor platform."""
import socket
import unittest
from copy import copy
from uuid import uuid4
from unittest.mock import patch, Mock
from tests.common import (get_test_home_assistant, assert_setup_component)
from homeassistant.setup import setup_component
from homeassistant.components.sensor import tcp
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.template import Template
TEST_CONFIG = {
'sensor': {
'platform': 'tcp',
tcp.CONF_NAME: 'test_name',
tcp.CONF_HOST: 'test_host',
tcp.CONF_PORT: 12345,
tcp.CONF_TIMEOUT: tcp.DEFAULT_TIMEOUT + 1,
tcp.CONF_PAYLOAD: 'test_payload',
tcp.CONF_UNIT_OF_MEASUREMENT: 'test_unit',
tcp.CONF_VALUE_TEMPLATE: Template('test_template'),
tcp.CONF_VALUE_ON: 'test_on',
tcp.CONF_BUFFER_SIZE: tcp.DEFAULT_BUFFER_SIZE + 1
},
}
KEYS_AND_DEFAULTS = {
tcp.CONF_TIMEOUT: tcp.DEFAULT_TIMEOUT,
tcp.CONF_UNIT_OF_MEASUREMENT: None,
tcp.CONF_VALUE_TEMPLATE: None,
tcp.CONF_VALUE_ON: None,
tcp.CONF_BUFFER_SIZE: tcp.DEFAULT_BUFFER_SIZE
}
class TestTCPSensor(unittest.TestCase):
"""Test the TCP Sensor."""
def setup_method(self, method):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
@patch('homeassistant.components.sensor.tcp.TcpSensor.update')
def test_setup_platform_valid_config(self, mock_update):
"""Check a valid configuration and call add_devices with sensor."""
with assert_setup_component(0, 'sensor'):
assert setup_component(self.hass, 'sensor', TEST_CONFIG)
add_devices = Mock()
tcp.setup_platform(None, TEST_CONFIG['sensor'], add_devices)
assert add_devices.called
assert isinstance(add_devices.call_args[0][0][0], tcp.TcpSensor)
def test_setup_platform_invalid_config(self):
"""Check an invalid configuration."""
with assert_setup_component(0):
assert setup_component(self.hass, 'sensor', {
'sensor': {
'platform': 'tcp',
'porrt': 1234,
}
})
@patch('homeassistant.components.sensor.tcp.TcpSensor.update')
def test_name(self, mock_update):
"""Return the name if set in the configuration."""
sensor = tcp.TcpSensor(self.hass, TEST_CONFIG['sensor'])
assert sensor.name == TEST_CONFIG['sensor'][tcp.CONF_NAME]
@patch('homeassistant.components.sensor.tcp.TcpSensor.update')
def test_name_not_set(self, mock_update):
"""Return the superclass name property if not set in configuration."""
config = copy(TEST_CONFIG['sensor'])
del config[tcp.CONF_NAME]
entity = Entity()
sensor = tcp.TcpSensor(self.hass, config)
assert sensor.name == entity.name
@patch('homeassistant.components.sensor.tcp.TcpSensor.update')
def test_state(self, mock_update):
"""Return the contents of _state."""
sensor = tcp.TcpSensor(self.hass, TEST_CONFIG['sensor'])
uuid = str(uuid4())
sensor._state = uuid
assert sensor.state == uuid
@patch('homeassistant.components.sensor.tcp.TcpSensor.update')
def test_unit_of_measurement(self, mock_update):
"""Return the configured unit of measurement."""
sensor = tcp.TcpSensor(self.hass, TEST_CONFIG['sensor'])
assert sensor.unit_of_measurement == \
TEST_CONFIG['sensor'][tcp.CONF_UNIT_OF_MEASUREMENT]
@patch('homeassistant.components.sensor.tcp.TcpSensor.update')
def test_config_valid_keys(self, *args):
"""Store valid keys in _config."""
sensor = tcp.TcpSensor(self.hass, TEST_CONFIG['sensor'])
del TEST_CONFIG['sensor']['platform']
for key in TEST_CONFIG['sensor']:
assert key in sensor._config
def test_validate_config_valid_keys(self):
"""Return True when provided with the correct keys."""
with assert_setup_component(0, 'sensor'):
assert setup_component(self.hass, 'sensor', TEST_CONFIG)
@patch('homeassistant.components.sensor.tcp.TcpSensor.update')
def test_config_invalid_keys(self, mock_update):
"""Shouldn't store invalid keys in _config."""
config = copy(TEST_CONFIG['sensor'])
config.update({
'a': 'test_a',
'b': 'test_b',
'c': 'test_c'
})
sensor = tcp.TcpSensor(self.hass, config)
for invalid_key in 'abc':
assert invalid_key not in sensor._config
def test_validate_config_invalid_keys(self):
"""Test with invalid keys plus some extra."""
config = copy(TEST_CONFIG['sensor'])
config.update({
'a': 'test_a',
'b': 'test_b',
'c': 'test_c'
})
with assert_setup_component(0, 'sensor'):
assert setup_component(self.hass, 'sensor', {'tcp': config})
@patch('homeassistant.components.sensor.tcp.TcpSensor.update')
def test_config_uses_defaults(self, mock_update):
"""Check if defaults were set."""
config = copy(TEST_CONFIG['sensor'])
for key in KEYS_AND_DEFAULTS:
del config[key]
with assert_setup_component(1) as result_config:
assert setup_component(self.hass, 'sensor', {
'sensor': config,
})
sensor = tcp.TcpSensor(self.hass, result_config['sensor'][0])
for key, default in KEYS_AND_DEFAULTS.items():
assert sensor._config[key] == default
def test_validate_config_missing_defaults(self):
"""Return True when defaulted keys are not provided."""
config = copy(TEST_CONFIG['sensor'])
for key in KEYS_AND_DEFAULTS:
del config[key]
with assert_setup_component(0, 'sensor'):
assert setup_component(self.hass, 'sensor', {'tcp': config})
def test_validate_config_missing_required(self):
"""Return False when required config items are missing."""
for key in TEST_CONFIG['sensor']:
if key in KEYS_AND_DEFAULTS:
continue
config = copy(TEST_CONFIG['sensor'])
del config[key]
with assert_setup_component(0, 'sensor'):
assert setup_component(self.hass, 'sensor', {'tcp': config})
@patch('homeassistant.components.sensor.tcp.TcpSensor.update')
def test_init_calls_update(self, mock_update):
"""Call update() method during __init__()."""
tcp.TcpSensor(self.hass, TEST_CONFIG)
assert mock_update.called
@patch('socket.socket')
@patch('select.select', return_value=(True, False, False))
def test_update_connects_to_host_and_port(self, mock_select, mock_socket):
"""Connect to the configured host and port."""
tcp.TcpSensor(self.hass, TEST_CONFIG['sensor'])
mock_socket = mock_socket().__enter__()
assert mock_socket.connect.mock_calls[0][1] == ((
TEST_CONFIG['sensor'][tcp.CONF_HOST],
TEST_CONFIG['sensor'][tcp.CONF_PORT]),)
@patch('socket.socket.connect', side_effect=socket.error())
def test_update_returns_if_connecting_fails(self, *args):
"""Return if connecting to host fails."""
with patch('homeassistant.components.sensor.tcp.TcpSensor.update'):
sensor = tcp.TcpSensor(self.hass, TEST_CONFIG['sensor'])
assert sensor.update() is None
@patch('socket.socket.connect')
@patch('socket.socket.send', side_effect=socket.error())
def test_update_returns_if_sending_fails(self, *args):
"""Return if sending fails."""
with patch('homeassistant.components.sensor.tcp.TcpSensor.update'):
sensor = tcp.TcpSensor(self.hass, TEST_CONFIG['sensor'])
assert sensor.update() is None
@patch('socket.socket.connect')
@patch('socket.socket.send')
@patch('select.select', return_value=(False, False, False))
def test_update_returns_if_select_fails(self, *args):
"""Return if select fails to return a socket."""
with patch('homeassistant.components.sensor.tcp.TcpSensor.update'):
sensor = tcp.TcpSensor(self.hass, TEST_CONFIG['sensor'])
assert sensor.update() is None
@patch('socket.socket')
@patch('select.select', return_value=(True, False, False))
def test_update_sends_payload(self, mock_select, mock_socket):
"""Send the configured payload as bytes."""
tcp.TcpSensor(self.hass, TEST_CONFIG['sensor'])
mock_socket = mock_socket().__enter__()
mock_socket.send.assert_called_with(
TEST_CONFIG['sensor'][tcp.CONF_PAYLOAD].encode()
)
@patch('socket.socket')
@patch('select.select', return_value=(True, False, False))
def test_update_calls_select_with_timeout(self, mock_select, mock_socket):
"""Provide the timeout argument to select."""
tcp.TcpSensor(self.hass, TEST_CONFIG['sensor'])
mock_socket = mock_socket().__enter__()
mock_select.assert_called_with(
[mock_socket], [], [], TEST_CONFIG['sensor'][tcp.CONF_TIMEOUT])
@patch('socket.socket')
@patch('select.select', return_value=(True, False, False))
def test_update_receives_packet_and_sets_as_state(
self, mock_select, mock_socket):
"""Test the response from the socket and set it as the state."""
test_value = 'test_value'
mock_socket = mock_socket().__enter__()
mock_socket.recv.return_value = test_value.encode()
config = copy(TEST_CONFIG['sensor'])
del config[tcp.CONF_VALUE_TEMPLATE]
sensor = tcp.TcpSensor(self.hass, config)
assert sensor._state == test_value
@patch('socket.socket')
@patch('select.select', return_value=(True, False, False))
def test_update_renders_value_in_template(self, mock_select, mock_socket):
"""Render the value in the provided template."""
test_value = 'test_value'
mock_socket = mock_socket().__enter__()
mock_socket.recv.return_value = test_value.encode()
config = copy(TEST_CONFIG['sensor'])
config[tcp.CONF_VALUE_TEMPLATE] = Template('{{ value }} {{ 1+1 }}')
sensor = tcp.TcpSensor(self.hass, config)
assert sensor._state == '%s 2' % test_value
@patch('socket.socket')
@patch('select.select', return_value=(True, False, False))
def test_update_returns_if_template_render_fails(
self, mock_select, mock_socket):
"""Return None if rendering the template fails."""
test_value = 'test_value'
mock_socket = mock_socket().__enter__()
mock_socket.recv.return_value = test_value.encode()
config = copy(TEST_CONFIG['sensor'])
config[tcp.CONF_VALUE_TEMPLATE] = Template("{{ this won't work")
sensor = tcp.TcpSensor(self.hass, config)
assert sensor.update() is None
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#===============================================================================
# Copyright (c) 2012 - 2014, GPy authors (see AUTHORS.txt).
# Copyright (c) 2014, James Hensman, Max Zwiessele
# Copyright (c) 2015, Max Zwiessele
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
from __future__ import print_function
import os
import sys
from setuptools import setup, Extension
import numpy as np
import codecs
def read(fname):
with codecs.open(fname, 'r', 'latin') as f:
return f.read()
def read_to_rst(fname):
try:
import pypandoc
rstname = "{}.{}".format(os.path.splitext(fname)[0], 'rst')
pypandoc.convert(read(fname), 'rst', format='md', outputfile=rstname)
with open(rstname, 'r') as f:
rststr = f.read()
return rststr
#return read(rstname)
except ImportError:
return read(fname)
desc = """
Please refer to the github homepage for detailed instructions on installation and usage.
"""
version_dummy = {}
exec(read('GPy/__version__.py'), version_dummy)
__version__ = version_dummy['__version__']
del version_dummy
#Mac OS X Clang doesn't support OpenMP at the current time.
#This detects if we are building on a Mac
def ismac():
return sys.platform[:6] == 'darwin'
if ismac():
compile_flags = [ '-O3', ]
link_args = []
else:
compile_flags = [ '-fopenmp', '-O3']
link_args = ['-lgomp' ]
ext_mods = [Extension(name='GPy.kern.src.stationary_cython',
sources=['GPy/kern/src/stationary_cython.c',
'GPy/kern/src/stationary_utils.c'],
include_dirs=[np.get_include(),'.'],
extra_compile_args=compile_flags,
extra_link_args = link_args),
Extension(name='GPy.util.choleskies_cython',
sources=['GPy/util/choleskies_cython.c'],
include_dirs=[np.get_include(),'.'],
extra_link_args = link_args,
extra_compile_args=compile_flags),
Extension(name='GPy.util.linalg_cython',
sources=['GPy/util/linalg_cython.c'],
include_dirs=[np.get_include(),'.'],
extra_compile_args=compile_flags),
Extension(name='GPy.kern.src.coregionalize_cython',
sources=['GPy/kern/src/coregionalize_cython.c'],
include_dirs=[np.get_include(),'.'],
extra_compile_args=compile_flags),
Extension(name='GPy.models.state_space_cython',
sources=['GPy/models/state_space_cython.c'],
include_dirs=[np.get_include(),'.'],
extra_compile_args=compile_flags)]
setup(name = 'GPy',
version = __version__,
author = read_to_rst('AUTHORS.txt'),
author_email = "gpy.authors@gmail.com",
description = ("The Gaussian Process Toolbox"),
long_description = desc,
license = "BSD 3-clause",
keywords = "machine-learning gaussian-processes kernels",
url = "http://sheffieldml.github.com/GPy/",
download_url='https://github.com/SheffieldML/GPy/',
ext_modules = ext_mods,
packages = ["GPy",
"GPy.core",
"GPy.core.parameterization",
"GPy.kern",
"GPy.kern.src",
"GPy.kern.src.psi_comp",
"GPy.models",
"GPy.inference",
"GPy.inference.optimization",
"GPy.inference.mcmc",
"GPy.inference.latent_function_inference",
"GPy.likelihoods",
"GPy.mappings",
"GPy.examples",
"GPy.testing",
"GPy.util",
"GPy.plotting",
"GPy.plotting.gpy_plot",
"GPy.plotting.matplot_dep",
"GPy.plotting.matplot_dep.controllers",
"GPy.plotting.plotly_dep",
],
package_dir={'GPy': 'GPy'},
#package_data = {'GPy': ['defaults.cfg', 'installation.cfg',
# 'util/data_resources.json',
# 'util/football_teams.json',
# 'testing/plotting_tests/baseline/*.png'
# ]},
#data_files=[('GPy/testing/plotting_tests/baseline', 'testing/plotting_tests/baseline/*.png'),
# ('GPy/testing/', 'GPy/testing/pickle_test.pickle'),
# ],
include_package_data = True,
py_modules = ['GPy.__init__'],
test_suite = 'GPy.testing',
setup_requires = ['numpy>=1.7'],
install_requires = ['numpy>=1.7', 'scipy>=0.16', 'six', 'paramz>=0.7.4'],
extras_require = {'docs':['sphinx'],
'optional':['mpi4py',
'ipython>=4.0.0',
],
'plotting':['matplotlib >= 1.3',
'plotly >= 1.8.6'],
'notebook':['jupyter_client >= 4.0.6',
'ipywidgets >= 4.0.3',
'ipykernel >= 4.1.0',
'notebook >= 4.0.5',
],
},
classifiers=['License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Framework :: IPython',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
# Check config files and settings:
local_file = os.path.abspath(os.path.join(os.path.dirname(__file__), 'GPy', 'installation.cfg'))
home = os.getenv('HOME') or os.getenv('USERPROFILE')
user_file = os.path.join(home,'.config', 'GPy', 'user.cfg')
print("")
try:
if not os.path.exists(user_file):
# Does an old config exist?
old_user_file = os.path.join(home,'.gpy_user.cfg')
if os.path.exists(old_user_file):
# Move it to new location:
print("GPy: Found old config file, moving to new location {}".format(user_file))
if not os.path.exists(os.path.dirname(user_file)):
os.makedirs(os.path.dirname(user_file))
os.rename(old_user_file, user_file)
else:
# No config file exists, save informative stub to user config folder:
print("GPy: Saving user configuration file to {}".format(user_file))
if not os.path.exists(os.path.dirname(user_file)):
os.makedirs(os.path.dirname(user_file))
with open(user_file, 'w') as f:
with open(local_file, 'r') as l:
tmp = l.read()
f.write(tmp)
else:
print("GPy: User configuration file at location {}".format(user_file))
except:
print("GPy: Could not write user configuration file {}".format(user_file))
| |
"""Support for the Fibaro devices."""
import logging
from collections import defaultdict
from typing import Optional
import voluptuous as vol
from homeassistant.const import (
ATTR_ARMED, ATTR_BATTERY_LEVEL, CONF_DEVICE_CLASS, CONF_EXCLUDE,
CONF_ICON, CONF_PASSWORD, CONF_URL, CONF_USERNAME,
CONF_WHITE_VALUE, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import convert, slugify
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_ENERGY_KWH = 'current_energy_kwh'
ATTR_CURRENT_POWER_W = 'current_power_w'
CONF_COLOR = 'color'
CONF_DEVICE_CONFIG = 'device_config'
CONF_DIMMING = 'dimming'
CONF_GATEWAYS = 'gateways'
CONF_PLUGINS = 'plugins'
CONF_RESET_COLOR = 'reset_color'
DOMAIN = 'fibaro'
FIBARO_CONTROLLERS = 'fibaro_controllers'
FIBARO_DEVICES = 'fibaro_devices'
FIBARO_COMPONENTS = ['binary_sensor', 'climate', 'cover', 'light',
'scene', 'sensor', 'switch']
FIBARO_TYPEMAP = {
'com.fibaro.multilevelSensor': "sensor",
'com.fibaro.binarySwitch': 'switch',
'com.fibaro.multilevelSwitch': 'switch',
'com.fibaro.FGD212': 'light',
'com.fibaro.FGR': 'cover',
'com.fibaro.doorSensor': 'binary_sensor',
'com.fibaro.doorWindowSensor': 'binary_sensor',
'com.fibaro.FGMS001': 'binary_sensor',
'com.fibaro.heatDetector': 'binary_sensor',
'com.fibaro.lifeDangerSensor': 'binary_sensor',
'com.fibaro.smokeSensor': 'binary_sensor',
'com.fibaro.remoteSwitch': 'switch',
'com.fibaro.sensor': 'sensor',
'com.fibaro.colorController': 'light',
'com.fibaro.securitySensor': 'binary_sensor',
'com.fibaro.hvac': 'climate',
'com.fibaro.setpoint': 'climate',
'com.fibaro.FGT001': 'climate',
'com.fibaro.thermostatDanfoss': 'climate'
}
DEVICE_CONFIG_SCHEMA_ENTRY = vol.Schema({
vol.Optional(CONF_DIMMING): cv.boolean,
vol.Optional(CONF_COLOR): cv.boolean,
vol.Optional(CONF_WHITE_VALUE): cv.boolean,
vol.Optional(CONF_RESET_COLOR): cv.boolean,
vol.Optional(CONF_DEVICE_CLASS): cv.string,
vol.Optional(CONF_ICON): cv.string,
})
FIBARO_ID_LIST_SCHEMA = vol.Schema([cv.string])
GATEWAY_CONFIG = vol.Schema({
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_URL): cv.url,
vol.Optional(CONF_PLUGINS, default=False): cv.boolean,
vol.Optional(CONF_EXCLUDE, default=[]): FIBARO_ID_LIST_SCHEMA,
vol.Optional(CONF_DEVICE_CONFIG, default={}):
vol.Schema({cv.string: DEVICE_CONFIG_SCHEMA_ENTRY})
}, extra=vol.ALLOW_EXTRA)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_GATEWAYS): vol.All(cv.ensure_list, [GATEWAY_CONFIG]),
})
}, extra=vol.ALLOW_EXTRA)
class FibaroController():
"""Initiate Fibaro Controller Class."""
def __init__(self, config):
"""Initialize the Fibaro controller."""
from fiblary3.client.v4.client import Client as FibaroClient
self._client = FibaroClient(
config[CONF_URL], config[CONF_USERNAME], config[CONF_PASSWORD])
self._scene_map = None
# Whether to import devices from plugins
self._import_plugins = config[CONF_PLUGINS]
self._device_config = config[CONF_DEVICE_CONFIG]
self._room_map = None # Mapping roomId to room object
self._device_map = None # Mapping deviceId to device object
self.fibaro_devices = None # List of devices by type
self._callbacks = {} # Update value callbacks by deviceId
self._state_handler = None # Fiblary's StateHandler object
self._excluded_devices = config[CONF_EXCLUDE]
self.hub_serial = None # Unique serial number of the hub
def connect(self):
"""Start the communication with the Fibaro controller."""
try:
login = self._client.login.get()
info = self._client.info.get()
self.hub_serial = slugify(info.serialNumber)
except AssertionError:
_LOGGER.error("Can't connect to Fibaro HC. "
"Please check URL.")
return False
if login is None or login.status is False:
_LOGGER.error("Invalid login for Fibaro HC. "
"Please check username and password")
return False
self._room_map = {room.id: room for room in self._client.rooms.list()}
self._read_devices()
self._read_scenes()
return True
def enable_state_handler(self):
"""Start StateHandler thread for monitoring updates."""
from fiblary3.client.v4.client import StateHandler
self._state_handler = StateHandler(self._client, self._on_state_change)
def disable_state_handler(self):
"""Stop StateHandler thread used for monitoring updates."""
self._state_handler.stop()
self._state_handler = None
def _on_state_change(self, state):
"""Handle change report received from the HomeCenter."""
callback_set = set()
for change in state.get('changes', []):
try:
dev_id = change.pop('id')
if dev_id not in self._device_map.keys():
continue
device = self._device_map[dev_id]
for property_name, value in change.items():
if property_name == 'log':
if value and value != "transfer OK":
_LOGGER.debug("LOG %s: %s",
device.friendly_name, value)
continue
if property_name == 'logTemp':
continue
if property_name in device.properties:
device.properties[property_name] = \
value
_LOGGER.debug("<- %s.%s = %s", device.ha_id,
property_name, str(value))
else:
_LOGGER.warning("%s.%s not found", device.ha_id,
property_name)
if dev_id in self._callbacks:
callback_set.add(dev_id)
except (ValueError, KeyError):
pass
for item in callback_set:
self._callbacks[item]()
def register(self, device_id, callback):
"""Register device with a callback for updates."""
self._callbacks[device_id] = callback
def get_children(self, device_id):
"""Get a list of child devices."""
return [
device for device in self._device_map.values()
if device.parentId == device_id]
def get_siblings(self, device_id):
"""Get the siblings of a device."""
return self.get_children(self._device_map[device_id].parentId)
@staticmethod
def _map_device_to_type(device):
"""Map device to HA device type."""
# Use our lookup table to identify device type
device_type = None
if 'type' in device:
device_type = FIBARO_TYPEMAP.get(device.type)
if device_type is None and 'baseType' in device:
device_type = FIBARO_TYPEMAP.get(device.baseType)
# We can also identify device type by its capabilities
if device_type is None:
if 'setBrightness' in device.actions:
device_type = 'light'
elif 'turnOn' in device.actions:
device_type = 'switch'
elif 'open' in device.actions:
device_type = 'cover'
elif 'value' in device.properties:
if device.properties.value in ('true', 'false'):
device_type = 'binary_sensor'
else:
device_type = 'sensor'
# Switches that control lights should show up as lights
if device_type == 'switch' and \
device.properties.get('isLight', 'false') == 'true':
device_type = 'light'
return device_type
def _read_scenes(self):
scenes = self._client.scenes.list()
self._scene_map = {}
for device in scenes:
if not device.visible:
continue
device.fibaro_controller = self
if device.roomID == 0:
room_name = 'Unknown'
else:
room_name = self._room_map[device.roomID].name
device.room_name = room_name
device.friendly_name = '{} {}'.format(room_name, device.name)
device.ha_id = 'scene_{}_{}_{}'.format(
slugify(room_name), slugify(device.name), device.id)
device.unique_id_str = "{}.scene.{}".format(
self.hub_serial, device.id)
self._scene_map[device.id] = device
self.fibaro_devices['scene'].append(device)
def _read_devices(self):
"""Read and process the device list."""
devices = self._client.devices.list()
self._device_map = {}
self.fibaro_devices = defaultdict(list)
last_climate_parent = None
for device in devices:
try:
device.fibaro_controller = self
if device.roomID == 0:
room_name = 'Unknown'
else:
room_name = self._room_map[device.roomID].name
device.room_name = room_name
device.friendly_name = room_name + ' ' + device.name
device.ha_id = '{}_{}_{}'.format(
slugify(room_name), slugify(device.name), device.id)
if device.enabled and \
('isPlugin' not in device or
(not device.isPlugin or self._import_plugins)) and \
device.ha_id not in self._excluded_devices:
device.mapped_type = self._map_device_to_type(device)
device.device_config = \
self._device_config.get(device.ha_id, {})
else:
device.mapped_type = None
dtype = device.mapped_type
if dtype:
device.unique_id_str = "{}.{}".format(
self.hub_serial, device.id)
self._device_map[device.id] = device
if dtype != 'climate':
self.fibaro_devices[dtype].append(device)
else:
# if a sibling of this has been added, skip this one
# otherwise add the first visible device in the group
# which is a hack, but solves a problem with FGT having
# hidden compatibility devices before the real device
if last_climate_parent != device.parentId and \
device.visible:
self.fibaro_devices[dtype].append(device)
last_climate_parent = device.parentId
_LOGGER.debug("%s (%s, %s) -> %s %s",
device.ha_id, device.type,
device.baseType, dtype,
str(device))
except (KeyError, ValueError):
pass
def setup(hass, base_config):
"""Set up the Fibaro Component."""
gateways = base_config[DOMAIN][CONF_GATEWAYS]
hass.data[FIBARO_CONTROLLERS] = {}
def stop_fibaro(event):
"""Stop Fibaro Thread."""
_LOGGER.info("Shutting down Fibaro connection")
for controller in hass.data[FIBARO_CONTROLLERS].values():
controller.disable_state_handler()
hass.data[FIBARO_DEVICES] = {}
for component in FIBARO_COMPONENTS:
hass.data[FIBARO_DEVICES][component] = []
for gateway in gateways:
controller = FibaroController(gateway)
if controller.connect():
hass.data[FIBARO_CONTROLLERS][controller.hub_serial] = controller
for component in FIBARO_COMPONENTS:
hass.data[FIBARO_DEVICES][component].extend(
controller.fibaro_devices[component])
if hass.data[FIBARO_CONTROLLERS]:
for component in FIBARO_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {},
base_config)
for controller in hass.data[FIBARO_CONTROLLERS].values():
controller.enable_state_handler()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_fibaro)
return True
return False
class FibaroDevice(Entity):
"""Representation of a Fibaro device entity."""
def __init__(self, fibaro_device):
"""Initialize the device."""
self.fibaro_device = fibaro_device
self.controller = fibaro_device.fibaro_controller
self._name = fibaro_device.friendly_name
self.ha_id = fibaro_device.ha_id
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.controller.register(self.fibaro_device.id, self._update_callback)
def _update_callback(self):
"""Update the state."""
self.schedule_update_ha_state(True)
@property
def level(self):
"""Get the level of Fibaro device."""
if 'value' in self.fibaro_device.properties:
return self.fibaro_device.properties.value
return None
@property
def level2(self):
"""Get the tilt level of Fibaro device."""
if 'value2' in self.fibaro_device.properties:
return self.fibaro_device.properties.value2
return None
def dont_know_message(self, action):
"""Make a warning in case we don't know how to perform an action."""
_LOGGER.warning("Not sure how to setValue: %s "
"(available actions: %s)", str(self.ha_id),
str(self.fibaro_device.actions))
def set_level(self, level):
"""Set the level of Fibaro device."""
self.action("setValue", level)
if 'value' in self.fibaro_device.properties:
self.fibaro_device.properties.value = level
if 'brightness' in self.fibaro_device.properties:
self.fibaro_device.properties.brightness = level
def set_level2(self, level):
"""Set the level2 of Fibaro device."""
self.action("setValue2", level)
if 'value2' in self.fibaro_device.properties:
self.fibaro_device.properties.value2 = level
def call_turn_on(self):
"""Turn on the Fibaro device."""
self.action("turnOn")
def call_turn_off(self):
"""Turn off the Fibaro device."""
self.action("turnOff")
def call_set_color(self, red, green, blue, white):
"""Set the color of Fibaro device."""
red = int(max(0, min(255, red)))
green = int(max(0, min(255, green)))
blue = int(max(0, min(255, blue)))
white = int(max(0, min(255, white)))
color_str = "{},{},{},{}".format(red, green, blue, white)
self.fibaro_device.properties.color = color_str
self.action("setColor", str(red), str(green),
str(blue), str(white))
def action(self, cmd, *args):
"""Perform an action on the Fibaro HC."""
if cmd in self.fibaro_device.actions:
getattr(self.fibaro_device, cmd)(*args)
_LOGGER.debug("-> %s.%s%s called", str(self.ha_id),
str(cmd), str(args))
else:
self.dont_know_message(cmd)
@property
def hidden(self) -> bool:
"""Return True if the entity should be hidden from UIs."""
return self.fibaro_device.visible is False
@property
def current_power_w(self):
"""Return the current power usage in W."""
if 'power' in self.fibaro_device.properties:
power = self.fibaro_device.properties.power
if power:
return convert(power, float, 0.0)
else:
return None
@property
def current_binary_state(self):
"""Return the current binary state."""
if self.fibaro_device.properties.value == 'false':
return False
if self.fibaro_device.properties.value == 'true' or \
int(self.fibaro_device.properties.value) > 0:
return True
return False
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self.fibaro_device.unique_id_str
@property
def name(self) -> Optional[str]:
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Get polling requirement from fibaro device."""
return False
def update(self):
"""Call to update state."""
pass
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {}
try:
if 'battery' in self.fibaro_device.interfaces:
attr[ATTR_BATTERY_LEVEL] = \
int(self.fibaro_device.properties.batteryLevel)
if 'fibaroAlarmArm' in self.fibaro_device.interfaces:
attr[ATTR_ARMED] = bool(self.fibaro_device.properties.armed)
if 'power' in self.fibaro_device.interfaces:
attr[ATTR_CURRENT_POWER_W] = convert(
self.fibaro_device.properties.power, float, 0.0)
if 'energy' in self.fibaro_device.interfaces:
attr[ATTR_CURRENT_ENERGY_KWH] = convert(
self.fibaro_device.properties.energy, float, 0.0)
except (ValueError, KeyError):
pass
attr['fibaro_id'] = self.fibaro_device.id
return attr
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Ponzoni, Nelson"
__copyright__ = "Copyright 2015"
__credits__ = ["Ponzoni Nelson"]
__maintainer__ = "Ponzoni Nelson"
__contact__ = "npcuadra@gmail.com"
__email__ = "npcuadra@gmail.com"
__license__ = "GPL"
__version__ = "1.0.0"
__status__ = "Production"
"""
funciones de activacion, para las nueronas/unidades
"""
#import theano
#import numpy
from theano.tensor import nnet as Tnet
from theano.tensor import erf as Terf
from theano.tensor import sqrt as Tsqrt
from theano.tensor import exp as Texp
from theano.tensor import pow as Tpow
from theano.tensor import tanh as Ttanh
from theano.tensor import dvector as Tdvector
from theano.tensor import cast as Tcast
from theano import function as Tfunction
from theano import config as Tconfig
import numpy.random as npRandom
from numpy import arange as npArange
from numpy import linspace as npLinspace
from numpy import tanh as npTanh
from numpy import exp as npExp
from abc import ABC, abstractmethod
import matplotlib.pylab as plt
from cupydle.dnn.graficos import dibujarFnActivacionTheano
from cupydle.dnn.graficos import dibujarFnActivacionNumpy
theanoFloat = Tconfig.floatX
class abstractstatic(staticmethod):
__slots__ = ()
def __init__(self, function):
super(abstractstatic, self).__init__(function)
function.__isabstractmethod__ = True
__isabstractmethod__ = True
#### --- funcion abstracta 'esqueleto' que todos deben implementar
class Funcion(ABC):
def __init__(self):
pass
@abstractmethod
def __call__(self, x):
return x
@abstractmethod
def __str__(self):
raise NotImplementedError()
@abstractmethod
def dibujar(self):
raise NotImplementedError()
##### -------------- THEANO
class identidadTheano(Funcion):
#def __init__(self):
# self.theanoGenerator = RandomStreams(seed=np.random.randint(1, 1000))
def __call__(self, x):
return x
def dibujar(self):
dibujarFnActivacionTheano(self=self, axe=None, axis=[-10.0, 10.0],
axline=[0.0, 0.0], mostrar=True)
def __str__(self):
return "Identidad Theano"
class sigmoideaTheano(Funcion):
def __call__(self, x):
return Tnet.sigmoid(x)
def dibujar(self):
#super(sigmoideaTheano, self).dibujar()
dibujarFnActivacionTheano(self=self, axe=None, axis=[-10.0, 10.0],
axline=[0.5, 0.0], mostrar=True)
return 1
def __str__(self):
return "Sigmoidea Theano"
class linealRectificadaTheano(Funcion):
def __call__(self, x):
#return x * (x > 0.0)
return Tnet.relu(x)
def dibujar(self):
dibujarFnActivacionTheano(self=self, axe=None, axis=[-10.0, 10.0],
axline=[0.0, 0.0], mostrar=True)
return 1
def __str__(self):
return "Lineal Rectificada Theano"
class tanhTheano(Funcion):
def __call__(self, x):
return Ttanh(x)
def dibujar(self):
dibujarFnActivacionTheano(self=self, axe=None, axis=[-10.0, 10.0],
axline=[0.0, 0.0], mostrar=True)
return 1
def __str__(self):
return "Hiperbolica Theano"
class softmaxTheano(Funcion):
# theano softmax es numericamente inestable? aparecen Nans... ver
def __call__(self, x):
e_x = Texp(x - x.max(axis=1, keepdims=True))
return e_x / e_x.sum(axis=1, keepdims=True)
def dibujar(self):
raise NotImplementedError()
#dibujarFnActivacionTheano(self=self, axe=None, axis=[-10.0, 10.0],
# axline=[0.0, 0.0], mostrar=True)
return 1
def __str__(self):
return "Softmax Theano"
class gaussianaTheano(Funcion):
def __init__(self, media, desviacionEstandar, factor=1.0):
# self.theanoGenerator = RandomStreams(seed=np.random.randint(1, 1000))
# varianza = desviacionEstandar**2
self.media=media
self.desviacionEstandar=desviacionEstandar
self.factor=factor
def __call__(self, x):
# funcion gaussiana se define como a*exp(- (x-mu)^2/(2*c^2) )
exponente = Tpow((x-self.media),2)/(2*Tpow(self.desviacionEstandar,2))
# funcion gaussiana normal..
e_x = self.factor * Texp(-exponente)
# si a es igual-> a= 1/sqrt(2*PI*c^2)
# funcion de densidad de una variable aleatoria con distribucion normal de media mu=b y varianza std2=c2.
#e_x = (1.0/(Tsqrt(2*3.141592*Tpow(self.desviacionEstandar,2)))) * Texp(-exponente)
return e_x
def dibujar(self):
dibujarFnActivacionTheano(self=self, axe=None, axis=[-10.0, 10.0],
axline=[0.0, 0.0], mostrar=True)
return 1
def __str__(self):
return "Gaussiana Theano"
### --------- Numpy
class identidadNumpy(Funcion):
def __call__(self, x):
return x
def dibujar(self):
dibujarFnActivacionNumpy(self=self, axe=None, axis=[-10.0, 10.0],
axline=[0.0, 0.0], mostrar=True)
return 1
def __str__(self):
return "Identidad Numpy"
class sigmoideaNumpy(Funcion):
def __call__(self, x):
return 1.0 / (1.0 + npExp(-x))
def dibujar(self):
dibujarFnActivacionNumpy(self=self, axe=None, axis=[-10.0, 10.0],
axline=[0.5, 0.0], mostrar=True)
return 1
def __str__(self):
return "Sigmoidea Numpy"
class linealRectificadaNumpy(Funcion):
def __call__(self, x):
return x * (x > 0.0)
def dibujar(self):
dibujarFnActivacionNumpy(self=self, axe=None, axis=[-10.0, 10.0],
axline=[0.0, 0.0], mostrar=True)
return 1
def __str__(self):
return "Lineal Rectificada Numpy"
class tanhNumpy(Funcion):
def __call__(self, x):
return npTanh(x)
def dibujar(self):
dibujarFnActivacionNumpy(self=self, axe=None, axis=[-10.0, 10.0],
axline=[0.0, 0.0], mostrar=True)
return 1
def __str__(self):
return "Hiperbolica Numpy"
class tanhDerivadaNumpy(Funcion):
def __call__(self, x):
return 1.0 - npTanh(x) ** 2
def dibujar(self):
dibujarFnActivacionNumpy(self=self, axe=None, axis=[-10.0, 10.0],
axline=[0.0, 0.0], mostrar=True)
return 1
def __str__(self):
return "Derivada Hiperbolica Numpy"
class sigmoideaDerivadaNumpy(Funcion):
def __call__(self, x):
fn=sigmoideaNumpy()
return fn(x) * (1.0 - fn(x))
def dibujar(self):
dibujarFnActivacionNumpy(self=self, axe=None, axis=[-10.0, 10.0],
axline=[0.0, 0.0], mostrar=True)
return 1
def __str__(self):
return "Derivada Sigmoidea Numpy"
# dictionaries to do the job more easy...
#activation_functions = {'Tanh': tanh, 'Sigmoid': sigmoid}
#activation_functions_prime = {'Tanh': tanh_prime, 'Sigmoid': sigmoid_prime}
if __name__ == '__main__':
assert False, "Este modulo no es ejecutable!!!"
| |
# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Modified by Haozhi Qi, from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# --------------------------------------------------------
"""
given a pascal voc imdb, compute mAP
"""
import numpy as np
import os
import cPickle
from mask.mask_transform import mask_overlap
def parse_voc_rec(filename):
"""
parse pascal voc record into a dictionary
:param filename: xml file path
:return: list of dict
"""
import xml.etree.ElementTree as ET
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_dict = dict()
obj_dict['name'] = obj.find('name').text
obj_dict['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_dict['bbox'] = [int(float(bbox.find('xmin').text)),
int(float(bbox.find('ymin').text)),
int(float(bbox.find('xmax').text)),
int(float(bbox.find('ymax').text))]
objects.append(obj_dict)
return objects
def voc_ap(rec, prec, use_07_metric=False):
"""
average precision calculations
[precision integrated to recall]
:param rec: recall
:param prec: precision
:param use_07_metric: 2007 metric is 11-recall-point based AP
:return: average precision
"""
if use_07_metric:
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap += p / 11.
else:
# append sentinel values at both ends
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute precision integration ladder
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# look for recall value changes
i = np.where(mrec[1:] != mrec[:-1])[0]
# sum (\delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath, annopath, imageset_file, classname, annocache, ovthresh=0.5, use_07_metric=False):
"""
pascal voc evaluation
:param detpath: detection results detpath.format(classname)
:param annopath: annotations annopath.format(classname)
:param imageset_file: text file containing list of images
:param classname: category name
:param annocache: caching annotations
:param ovthresh: overlap threshold
:param use_07_metric: whether to use voc07's 11 point ap computation
:return: rec, prec, ap
"""
with open(imageset_file, 'r') as f:
lines = f.readlines()
image_filenames = [x.strip() for x in lines]
# load annotations from cache
if not os.path.isfile(annocache):
recs = {}
for ind, image_filename in enumerate(image_filenames):
recs[image_filename] = parse_voc_rec(annopath.format(image_filename))
if ind % 100 == 0:
print 'reading annotations for {:d}/{:d}'.format(ind + 1, len(image_filenames))
print 'saving annotations cache to {:s}'.format(annocache)
with open(annocache, 'wb') as f:
cPickle.dump(recs, f, protocol=cPickle.HIGHEST_PROTOCOL)
else:
with open(annocache, 'rb') as f:
recs = cPickle.load(f)
# extract objects in :param classname:
class_recs = {}
npos = 0
for image_filename in image_filenames:
objects = [obj for obj in recs[image_filename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in objects])
difficult = np.array([x['difficult'] for x in objects]).astype(np.bool)
det = [False] * len(objects) # stand for detected
npos = npos + sum(~difficult)
class_recs[image_filename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read detections
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
bbox = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
if bbox.shape[0] > 0:
sorted_inds = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
bbox = bbox[sorted_inds, :]
image_ids = [image_ids[x] for x in sorted_inds]
# go down detections and mark true positives and false positives
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
r = class_recs[image_ids[d]]
bb = bbox[d, :].astype(float)
ovmax = -np.inf
bbgt = r['bbox'].astype(float)
if bbgt.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(bbgt[:, 0], bb[0])
iymin = np.maximum(bbgt[:, 1], bb[1])
ixmax = np.minimum(bbgt[:, 2], bb[2])
iymax = np.minimum(bbgt[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(bbgt[:, 2] - bbgt[:, 0] + 1.) *
(bbgt[:, 3] - bbgt[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not r['difficult'][jmax]:
if not r['det'][jmax]:
tp[d] = 1.
r['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid division by zero in case first detection matches a difficult ground ruth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
def voc_eval_sds(det_file, seg_file, devkit_path, image_list, cls_name, cache_dir,
class_names, mask_size, binary_thresh, ov_thresh=0.5):
# 1. Check whether ground truth cache file exists
with open(image_list, 'r') as f:
lines = f.readlines()
image_names = [x.strip() for x in lines]
check_voc_sds_cache(cache_dir, devkit_path, image_names, class_names)
gt_cache = cache_dir + '/' + cls_name + '_mask_gt.pkl'
with open(gt_cache, 'rb') as f:
gt_pkl = cPickle.load(f)
# 2. Get predict pickle file for this class
with open(det_file, 'rb') as f:
boxes_pkl = cPickle.load(f)
with open(seg_file, 'rb') as f:
masks_pkl = cPickle.load(f)
# 3. Pre-compute number of total instances to allocate memory
num_image = len(image_names)
box_num = 0
for im_i in xrange(num_image):
box_num += len(boxes_pkl[im_i])
# 4. Re-organize all the predicted boxes
new_boxes = np.zeros((box_num, 5))
new_masks = np.zeros((box_num, mask_size, mask_size))
new_image = []
cnt = 0
for image_ind in xrange(len(image_names)):
boxes = boxes_pkl[image_ind]
masks = masks_pkl[image_ind]
num_instance = len(boxes)
for box_ind in xrange(num_instance):
new_boxes[cnt] = boxes[box_ind]
new_masks[cnt] = masks[box_ind]
new_image.append(image_names[image_ind])
cnt += 1
# 5. Rearrange boxes according to their scores
seg_scores = new_boxes[:, -1]
keep_inds = np.argsort(-seg_scores)
new_boxes = new_boxes[keep_inds, :]
new_masks = new_masks[keep_inds, :, :]
num_pred = new_boxes.shape[0]
import cv2
# 6. Calculate t/f positive
fp = np.zeros((num_pred, 1))
tp = np.zeros((num_pred, 1))
for i in xrange(num_pred):
pred_box = np.round(new_boxes[i, :4]).astype(int)
pred_mask = new_masks[i]
pred_mask = cv2.resize(pred_mask.astype(np.float32), (pred_box[2] - pred_box[0] + 1, pred_box[3] - pred_box[1] + 1))
pred_mask = pred_mask >= binary_thresh
image_index = new_image[keep_inds[i]]
if image_index not in gt_pkl:
fp[i] = 1
continue
gt_dict_list = gt_pkl[image_index]
# calculate max region overlap
cur_overlap = -1000
cur_overlap_ind = -1
for ind2, gt_dict in enumerate(gt_dict_list):
gt_mask_bound = np.round(gt_dict['mask_bound']).astype(int)
pred_mask_bound = pred_box
ov = mask_overlap(gt_mask_bound, pred_mask_bound, gt_dict['mask'], pred_mask)
if ov > cur_overlap:
cur_overlap = ov
cur_overlap_ind = ind2
if cur_overlap >= ov_thresh:
if gt_dict_list[cur_overlap_ind]['already_detect']:
fp[i] = 1
else:
tp[i] = 1
gt_dict_list[cur_overlap_ind]['already_detect'] = 1
else:
fp[i] = 1
# 7. Calculate precision
num_pos = 0
for key, val in gt_pkl.iteritems():
num_pos += len(val)
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(num_pos)
# avoid divide by zero in case the first matches a difficult gt
prec = tp / np.maximum(fp+tp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, True)
return ap
def parse_inst(image_name, devkit_path):
"""
Get cooresponding masks, boxes, classes according to image name
Args:
image_name: input image name
devkit_path: root dir for devkit SDS
Returns:
roi/mask dictionary of this image
"""
import PIL
seg_obj_name = os.path.join(devkit_path, 'SegmentationObject', image_name + '.png')
seg_obj_data = PIL.Image.open(seg_obj_name)
seg_obj_data = np.array(seg_obj_data.getdata(), np.uint8).reshape(seg_obj_data.size[1], seg_obj_data.size[0])
seg_cls_name = os.path.join(devkit_path, 'SegmentationClass', image_name + '.png')
seg_cls_data = PIL.Image.open(seg_cls_name)
seg_cls_data = np.array(seg_cls_data.getdata(), np.uint8).reshape(seg_cls_data.size[1], seg_cls_data.size[0])
unique_inst = np.unique(seg_obj_data)
# delete background pixels
background_ind = np.where(unique_inst == 0)[0]
unique_inst = np.delete(unique_inst, background_ind)
record = []
for inst_ind in xrange(unique_inst.shape[0]):
[r, c] = np.where(seg_obj_data == unique_inst[inst_ind])
mask_bound = np.zeros(4, dtype=int)
mask_bound[0] = np.min(c)
mask_bound[1] = np.min(r)
mask_bound[2] = np.max(c)
mask_bound[3] = np.max(r)
mask = seg_obj_data[mask_bound[1]:mask_bound[3]+1, mask_bound[0]:mask_bound[2]+1]
mask = (mask == unique_inst[inst_ind])
mask_cls = seg_cls_data[mask_bound[1]:mask_bound[3]+1, mask_bound[0]:mask_bound[2]+1]
mask_cls = mask_cls[mask]
num_cls = np.unique(mask_cls)
assert num_cls.shape[0] == 1
cur_inst = num_cls[0]
record.append({
'mask': mask,
'mask_cls': cur_inst,
'mask_bound': mask_bound
})
return record
def check_voc_sds_cache(cache_dir, devkit_path, image_names, class_names):
"""
Args:
cache_dir: output directory for cached mask annotation
devkit_path: root directory of VOCdevkitSDS
image_names: used for parse image instances
class_names: VOC 20 class names
"""
if not os.path.isdir(cache_dir):
os.mkdir(cache_dir)
exist_cache = True
for cls_name in class_names:
if cls_name == '__background__':
continue
cache_name = os.path.join(cache_dir, cls_name + '_mask_gt.pkl')
if not os.path.isfile(cache_name):
exist_cache = False
break
if not exist_cache:
# load annotations:
# create a list with size classes
record_list = [{} for _ in xrange(21)]
for i, image_name in enumerate(image_names):
record = parse_inst(image_name, devkit_path)
for j, mask_dic in enumerate(record):
cls = mask_dic['mask_cls']
mask_dic['already_detect'] = False
if image_name not in record_list[cls]:
record_list[cls][image_name] = []
record_list[cls][image_name].append(mask_dic)
if i % 100 == 0:
print 'Reading annotation for {:d}/{:d}'.format(i + 1, len(image_names))
print 'Saving cached annotations...'
for cls_ind, name in enumerate(class_names):
if name == '__background__':
continue
cachefile = os.path.join(cache_dir, name + '_mask_gt.pkl')
with open(cachefile, 'wb') as f:
cPickle.dump(record_list[cls_ind], f)
| |
from __future__ import absolute_import
import json
import os
from six.moves.urllib_parse import urlparse
from autobahn.twisted.resource import WebSocketResource
from autobahn.twisted.websocket import (WebSocketServerFactory,
WebSocketServerProtocol)
from weakref import WeakKeyDictionary, WeakValueDictionary
from monotonic import monotonic
from twisted.python import log
from twisted.python.failure import Failure
from scrapy.utils.serialize import ScrapyJSONEncoder
from splash import defaults
from splash.browser_tab import BrowserTab
from splash.network_manager import SplashQNetworkAccessManager
from splash.render_options import RenderOptions
from splash import defaults
from PyQt4.QtCore import QObject
from PyQt4.QtCore import pyqtSlot
from PyQt4.QtWebKit import QWebElement
from slybot.spider import IblSpider
from slyd.errors import BaseHTTPError
from .cookies import PortiaCookieJar
from .commands import (load_page, interact_page, close_tab, metadata, resize,
resolve, update_project_data, rename_project_data,
delete_project_data, pause, resume, log_event)
from .css_utils import process_css, wrap_url
import six
text = six.text_type # unicode in py2, str in py3
import txaio
txaio.use_twisted()
_DEFAULT_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'
_DEFAULT_VIEWPORT = '1240x680'
def create_ferry_resource(spec_manager, factory):
return FerryWebSocketResource(spec_manager, factory)
class PortiaNetworkManager(SplashQNetworkAccessManager):
_raw_html = None
def createRequest(self, operation, request, outgoingData=None):
reply = super(PortiaNetworkManager, self).createRequest(operation, request, outgoingData)
try:
url = six.binary_type(request.url().toEncoded())
frame_url = six.binary_type(self.tab.web_page.mainFrame().requestedUrl().toEncoded())
if url == frame_url:
self._raw_html = ''
reply.readyRead.connect(self._ready_read)
except:
log.err()
finally:
return reply
def _ready_read(self):
reply = self.sender()
self._raw_html = self._raw_html + six.binary_type(reply.peek(reply.bytesAvailable()))
class FerryWebSocketResource(WebSocketResource):
def __init__(self, spec_manager, factory):
self.spec_manager = spec_manager
settings = spec_manager.settings
self.settings = spec_manager.settings
FerryServerProtocol.spec_manager = spec_manager
FerryServerProtocol.settings = settings
FerryServerProtocol.assets = factory.assets
WebSocketResource.__init__(self, factory)
def render(self, request):
request.requestHeaders.setRawHeaders('X-Auth-Info',
[json.dumps(request.auth_info)])
return WebSocketResource.render(self, request)
class User(object):
_by_id = WeakValueDictionary()
def __init__(self, auth, tab=None, spider=None, spiderspec=None):
self.auth = auth
self.authorized_projects = auth.get('authorized_projects', None)
self.tab = tab
self.spider = spider
self.spiderspec = spiderspec
self.tabid = id(self)
User._by_id[self.tabid] = self
@classmethod
def findById(cls, tabid):
return cls._by_id.get(tabid, None)
@property
def name(self):
return self.auth.get('username', '')
def __getattr__(self, key):
try:
return self.auth[key]
except KeyError:
name = self.__class__.__name__
raise AttributeError('"%s" has no attribute "%s"' % (name, key))
class SpiderSpec(object):
def __init__(self, name, spider, items, extractors):
self.name = name
self.spider = spider
self.items = items
self.extractors = extractors
@property
def templates(self):
return self.spider['templates']
class PortiaJSApi(QObject):
def __init__(self, protocol):
super(PortiaJSApi, self).__init__()
self.protocol = protocol
@pyqtSlot(QWebElement)
def returnElement(self, element):
"""Hack to return an DOM node as a QWebElement instead of
QVariant(QVariantMap) """
self.element = element
def getReturnedElement(self):
element = self.element
self.element = None
return element
@pyqtSlot('QString', 'QString', result='QString')
def processCss(self, css, baseuri):
return process_css(text(css), self.protocol.user.tabid, text(baseuri))
@pyqtSlot('QString', 'QString', result='QString')
def wrapUrl(self, url, baseuri):
return wrap_url(text(url), self.protocol.user.tabid, text(baseuri))
@pyqtSlot('QString')
def log(self, s):
print(s)
@pyqtSlot('QString')
def sendMessage(self, message):
message = text(message)
message = message.strip('\x00') # Allocation bug somewhere leaves null characters at the end.
try:
command, data = json.loads(message)
except ValueError as e:
return log.err(ValueError(
"%s JSON head: %r tail: %r" % (e.message, message[:100], message[-100:])
))
self.protocol.sendMessage({
'_command': command,
'_data': data
})
if command == 'mutation':
self.protocol.sendMessage(metadata(self.protocol))
class FerryServerProtocol(WebSocketServerProtocol):
_handlers = {
'load': load_page,
'interact': interact_page,
'close_tab': close_tab,
'heartbeat': lambda d, s: None,
'resize': resize,
'saveChanges': update_project_data,
'delete': delete_project_data,
'rename': rename_project_data,
'resolve': resolve,
'resume': resume,
'log_event': log_event,
'pause': pause
}
spec_manager = None
settings = None
assets = './'
storage = None
@property
def tab(self):
return self.factory[self].tab
@property
def spider(self):
return self.factory[self].spider
@property
def spiderspec(self):
return self.factory[self].spiderspec
@property
def user(self):
return self.factory[self]
def onConnect(self, request):
try:
request.auth_info = json.loads(request.headers['x-auth-info'])
except (KeyError, TypeError):
return
self.start_time = monotonic()
self.spent_time = 0
self.session_id = ''
self.factory[self] = User(request.auth_info)
def onOpen(self):
if self not in self.factory:
self.sendClose(1000, 'Invalid Connection missing required '
'parameters')
def onMessage(self, payload, isbinary):
payload = payload.decode('utf-8')
data = json.loads(payload)
if '_meta' in data and 'session_id' in data['_meta']:
self.session_id = data['_meta']['session_id']
if '_command' in data and data['_command'] in self._handlers:
command = data['_command']
try:
result = self._handlers[command](data, self)
except Exception as e:
command = data.get('_callback') or command
if isinstance(e, BaseHTTPError):
code = e.status
reason = e.title
else:
code = 500
reason = "Internal Server Error"
failure = Failure(e)
log.err(failure)
event_id = getattr(failure, 'sentry_event_id', None)
if event_id:
reason = "%s (Event ID: %s)" % (reason, event_id)
return self.sendMessage({
'error': code,
'_command': command,
'id': data.get('_meta', {}).get('id'),
'reason': reason
})
if result:
result.setdefault('_command', data.get('_callback', command))
self.sendMessage(result)
else:
command = data.get('_command')
if command:
message = 'No command named "%s" found.' % command
else:
message = "No command received"
self.sendMessage({'error': 4000,
'reason': message})
def onClose(self, was_clean, code, reason):
if self in self.factory:
if self.tab is not None:
self.tab.close()
self._handlers['pause']({}, self)
msg_data = {'session': self.session_id,
'session_time': self.spent_time,
'user': self.user.name}
msg = (u'Websocket Closed: id=%(session)s t=%(session_time)s '
u'user=%(user)s command=' % (msg_data))
log.err(msg)
def sendMessage(self, payload, is_binary=False):
super(FerryServerProtocol, self).sendMessage(
json.dumps(payload, cls=ScrapyJSONEncoder, sort_keys=True),
is_binary
)
def getElementByNodeId(self, nodeid):
self.tab.web_page.mainFrame().evaluateJavaScript(
'livePortiaPage.pyGetByNodeId(%s)' % nodeid
)
return self.js_api.getReturnedElement()
def open_tab(self, meta=None):
if meta is None:
meta = {}
manager = PortiaNetworkManager(
filters_path=None,
allowed_schemes=defaults.ALLOWED_SCHEMES,
verbosity=defaults.VERBOSITY
)
manager.setCache(None)
data = {}
data['uid'] = id(data)
self.factory[self].tab = BrowserTab(
network_manager=manager,
splash_proxy_factory=None,
verbosity=0,
render_options=RenderOptions(data, defaults.MAX_TIMEOUT),
visible=True,
)
manager.tab = self.tab
main_frame = self.tab.web_page.mainFrame()
cookiejar = PortiaCookieJar(self.tab.web_page, self)
self.tab.web_page.cookiejar = cookiejar
if meta.get('cookies'):
cookiejar.put_client_cookies(meta['cookies'])
if meta.get('storage'):
self.storage = meta['storage']
main_frame.loadStarted.connect(self._on_load_started)
self.js_api = PortiaJSApi(self)
main_frame.javaScriptWindowObjectCleared.connect(
self.populate_window_object
)
self.tab.set_images_enabled(False)
self.tab.set_viewport(meta.get('viewport', _DEFAULT_VIEWPORT))
self.tab.set_user_agent(meta.get('user_agent', _DEFAULT_USER_AGENT))
self.tab.loaded = False
def _on_load_started(self):
self.sendMessage({'_command': 'loadStarted'})
def populate_window_object(self):
main_frame = self.tab.web_page.mainFrame()
main_frame.addToJavaScriptWindowObject('__portiaApi', self.js_api)
self.tab.run_js_files(
os.path.join(self.assets, 'splash_content_scripts'),
handle_errors=False)
origin = self.tab.evaljs('location.origin')
storage = self.storage or {}
local_storage = storage.get('local', {}).get(origin, {})
session_storage = storage.get('session', {}).get(origin, {})
if local_storage or session_storage:
script = 'livePortiaPage.setLocalStorage(%s, %s)' % (
json.dumps(local_storage), json.dumps(session_storage)
)
main_frame.evaluateJavaScript(script)
def open_spider(self, meta):
if ('project' not in meta or 'spider' not in meta or
(self.user.authorized_projects is not None and
meta['project'] not in self.user.authorized_projects and
not self.user.staff)):
return {'error': 4004,
'reason': 'Project "%s" not found' % meta['project']}
spider_name = meta['spider']
spec = self.spec_manager.project_spec(meta['project'], self.user.auth)
spider = spec.spider_with_templates(spider_name)
items = spec.resource('items')
extractors = spec.resource('extractors')
if not self.settings.get('SPLASH_URL'):
self.settings.set('SPLASH_URL', 'portia')
self.factory[self].spider = IblSpider(spider_name, spider, items,
extractors, self.settings)
self.factory[self].spiderspec = SpiderSpec(spider_name, spider, items,
extractors)
def update_spider(self, meta, spider=None, template=None, items=None,
extractors=None):
if not hasattr(self.factory[self], 'spiderspec'):
return self.open_spider(meta)
spec = self.factory[self].spiderspec
if spec is None or spec.name != meta.get('spider'):
return self.open_spider(meta)
items = items or spec.items
extractors = extractors or spec.extractors
if spider:
spider['templates'] = spec.spider['templates']
else:
spider = spec.spider
if template:
for idx, tmpl in enumerate(spider['templates']):
if template['original_body'] == tmpl['original_body']:
spider['templates'][idx] = template
break
else:
spider['templates'].append(template)
spider['template_names'] = [t['name'] for t in spider['templates']]
self.factory[self].spider = IblSpider(meta['spider'], spider, items,
extractors, self.settings)
self.factory[self].spiderspec = SpiderSpec(meta['spider'], spider,
items, extractors)
class FerryServerFactory(WebSocketServerFactory):
def __init__(self, uri, debug=False, assets='./'):
WebSocketServerFactory.__init__(self, uri, debug=debug)
self._peers = WeakKeyDictionary()
self.assets = assets
def __getitem__(self, key):
if key in self._peers:
return self._peers[key]
return None
def __setitem__(self, key, value):
self._peers[key] = value
def __contains__(self, key):
if self._peers.get(key) is not None:
return True
return False
def __repr__(self):
return 'Ferry(%s)' % ', '.join('User(%s)' % (
urlparse(user.tab.url).netloc
for user in self._peers.values()
if user.tab))
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Property_fee.send_message'
db.add_column(u'SmartDataApp_property_fee', 'send_message',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Property_fee.send_message'
db.delete_column(u'SmartDataApp_property_fee', 'send_message')
models = {
u'SmartDataApp.car_washing': {
'Meta': {'object_name': 'Car_Washing'},
'apply_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.ProfileDetail']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'other_car_num': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'washing_case': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'washing_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'washing_type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'SmartDataApp.chinapayhistory': {
'Meta': {'object_name': 'ChinaPayHistory'},
'charset': ('django.db.models.fields.CharField', [], {'default': "u'UTF-8'", 'max_length': '250', 'null': 'True'}),
'cup_reserved': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'exchange_date': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'exchange_rate': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mer_abbr': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'mer_id': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'order_amount': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'order_currency': ('django.db.models.fields.CharField', [], {'default': "u'156'", 'max_length': '250', 'null': 'True'}),
'order_number': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'qid': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True'}),
'resp_code': ('django.db.models.fields.CharField', [], {'default': "'00'", 'max_length': '250', 'null': 'True'}),
'resp_msg': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'resp_time': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'settle_amount': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'settle_currency': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'settle_date': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'sign_method': ('django.db.models.fields.CharField', [], {'default': "u'MD5'", 'max_length': '250', 'null': 'True'}),
'signature': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'trace_time': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'trance_number': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'trance_type': ('django.db.models.fields.CharField', [], {'default': "u'01'", 'max_length': '250', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "u'1.0.0'", 'max_length': '250', 'null': 'True'})
},
u'SmartDataApp.community': {
'Meta': {'object_name': 'Community'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'SmartDataApp.complaints': {
'Meta': {'object_name': 'Complaints'},
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'author_detail': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.ProfileDetail']", 'null': 'True'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'complete_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'content': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'create_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'handler': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_worker_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pleased': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pleased_reason': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'SmartDataApp.express': {
'Meta': {'object_name': 'Express'},
'allowable_get_express_time': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'arrive_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'arrive_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.ProfileDetail']", 'null': 'True'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'complete_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'get_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'handler': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_worker_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pleased': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pleased_reason': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'signer': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'submit_express_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'submit_get_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'SmartDataApp.fee_standard': {
'Meta': {'object_name': 'Fee_standard'},
'deadline': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '6', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'})
},
u'SmartDataApp.fees': {
'Meta': {'object_name': 'Fees'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '19', 'decimal_places': '3'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'SmartDataApp.housekeeping': {
'Meta': {'object_name': 'Housekeeping'},
'allow_deal_time': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'apply_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.ProfileDetail']", 'null': 'True'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
'handler': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'housekeeping_item': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Housekeeping_items']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_worker_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pleased': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pleased_reason': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'SmartDataApp.housekeeping_items': {
'Meta': {'object_name': 'Housekeeping_items'},
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
'content': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'price': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'price_description': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'remarks': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'})
},
u'SmartDataApp.notification': {
'Meta': {'object_name': 'Notification'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notification_community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
'notification_content': ('django.db.models.fields.CharField', [], {'max_length': '100000', 'null': 'True', 'blank': 'True'}),
'notification_theme': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'notification_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'SmartDataApp.ordernumber': {
'Meta': {'object_name': 'OrderNumber'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'default': "u'10000000'", 'max_length': '10', 'null': 'True'})
},
u'SmartDataApp.park_fee': {
'Meta': {'object_name': 'Park_fee'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.ProfileDetail']", 'null': 'True'}),
'car_number': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'park_type': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'renewal_fees': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'valid_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'})
},
u'SmartDataApp.picture': {
'Meta': {'object_name': 'Picture'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keep': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'like': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'timestamp_add': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'SmartDataApp.profiledetail': {
'Meta': {'object_name': 'ProfileDetail'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
'device_chanel_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True'}),
'device_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True'}),
'device_user_id': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '250', 'null': 'True'}),
'floor': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'gate_card': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'house_acreage': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '6', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '11', 'null': 'True'}),
'profile': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'SmartDataApp.property_fee': {
'Meta': {'object_name': 'Property_fee'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.ProfileDetail']", 'null': 'True'}),
'deadline': ('django.db.models.fields.DateField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pay_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'pay_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'send_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'valid_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'})
},
u'SmartDataApp.repair': {
'Meta': {'object_name': 'Repair'},
'author': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'author_detail': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.ProfileDetail']", 'null': 'True'}),
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'complete_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'content': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'create_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'handler': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_worker_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pleased': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pleased_reason': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'repair_item': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'src': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'SmartDataApp.repair_item': {
'Meta': {'object_name': 'Repair_item'},
'community': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Community']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'SmartDataApp.transaction': {
'Meta': {'object_name': 'Transaction'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'grade_num': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'money_num': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '19', 'decimal_places': '6'}),
'order_number': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'remark': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'wallet_profile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['SmartDataApp.Wallet']", 'null': 'True'})
},
u'SmartDataApp.wallet': {
'Meta': {'object_name': 'Wallet'},
'grade_sum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'money_sum': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '19', 'decimal_places': '6'}),
'user_profile': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['SmartDataApp.ProfileDetail']", 'unique': 'True', 'null': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['SmartDataApp']
| |
#
# Copyright (C) 2004-2022 ZNC, see the NOTICE file for details.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
_cov = None
import os
if os.environ.get('ZNC_MODPYTHON_COVERAGE'):
import coverage
_cov = coverage.Coverage(auto_data=True, branch=True)
_cov.start()
from functools import wraps
import collections.abc
import importlib.abc
import importlib.machinery
import importlib.util
import re
import sys
import traceback
from znc_core import *
class Socket:
ADDR_MAP = {
'ipv4': ADDR_IPV4ONLY,
'ipv6': ADDR_IPV6ONLY,
'all': ADDR_ALL
}
def _Accepted(self, host, port):
return getattr(self.OnAccepted(host, port), '_csock', None)
def GetModule(self):
return AsPyModule(self._csock.GetModule()).GetNewPyObj()
def Listen(self, addrtype='all', port=None, bindhost='', ssl=False,
maxconns=GetSOMAXCONN(), timeout=0):
try:
addr = self.ADDR_MAP[addrtype.lower()]
except KeyError:
raise ValueError(
"Specified addrtype [{0}] isn't supported".format(addrtype))
args = (
"python socket for {0}".format(self.GetModule()),
bindhost,
ssl,
maxconns,
self._csock,
timeout,
addr
)
if port is None:
return self.GetModule().GetManager().ListenRand(*args)
if self.GetModule().GetManager().ListenHost(port, *args):
return port
return 0
def Connect(self, host, port, timeout=60, ssl=False, bindhost=''):
return self.GetModule().GetManager().Connect(
host,
port,
'python conn socket for {0}'.format(self.GetModule()),
timeout,
ssl,
bindhost,
self._csock
)
def Write(self, data):
if (isinstance(data, str)):
return self._csock.Write(data)
raise TypeError(
'socket.Write needs str. If you want binary data, use WriteBytes')
def Init(self, *a, **b):
pass
def OnConnected(self):
pass
def OnDisconnected(self):
pass
def OnTimeout(self):
pass
def OnConnectionRefused(self):
pass
def OnReadData(self, bytess):
pass
def OnReadLine(self, line):
pass
def OnAccepted(self, host, port):
pass
def OnShutdown(self):
pass
class Timer:
def GetModule(self):
return AsPyModule(self._ctimer.GetModule()).GetNewPyObj()
def RunJob(self):
pass
def OnShutdown(self):
pass
class ModuleNVIter(collections.abc.Iterator):
def __init__(self, cmod):
self._cmod = cmod
self.it = cmod.BeginNV_()
def __next__(self):
if self.it.is_end(self._cmod):
raise StopIteration
res = self.it.get()
self.it.plusplus()
return res
class ModuleNV(collections.abc.MutableMapping):
def __init__(self, cmod):
self._cmod = cmod
def __setitem__(self, key, value):
self._cmod.SetNV(key, value)
def __getitem__(self, key):
if not self._cmod.ExistsNV(key):
raise KeyError
return self._cmod.GetNV(key)
def __contains__(self, key):
return self._cmod.ExistsNV(key)
def __delitem__(self, key):
self._cmod.DelNV(key)
def keys(self):
return ModuleNVIter(self._cmod)
__iter__ = keys
def __len__(self):
raise NotImplemented
class Module:
description = '< Placeholder for a description >'
module_types = [CModInfo.NetworkModule]
wiki_page = ''
has_args = False
args_help_text = ''
def __str__(self):
return self.GetModName()
@classmethod
def t_s(cls, english, context=''):
domain = 'znc-' + cls.__name__
return CTranslation.Get().Singular(domain, context, english)
@classmethod
def t_f(cls, english, context=''):
fmt = cls.t_s(english, context)
# Returning bound method
return fmt.format
@classmethod
def t_p(cls, english, englishes, num, context=''):
domain = 'znc-' + cls.__name__
fmt = CTranslation.Get().Plural(domain, context, english, englishes,
num)
return fmt.format
# TODO is "t_d" needed for python? Maybe after AddCommand is implemented
def OnLoad(self, sArgs, sMessage):
return True
def _GetSubPages(self):
return self.GetSubPages()
def CreateSocket(self, socketclass=Socket, *the, **rest):
socket = socketclass()
socket._csock = CreatePySocket(self._cmod, socket)
socket.Init(*the, **rest)
return socket
def CreateTimer(self, timer, interval=10, cycles=1, label='pytimer',
description='Some python timer'):
t = timer()
t._ctimer = CreatePyTimer(self._cmod, interval, cycles, label,
description, t)
return t
def GetSubPages(self):
pass
def OnShutdown(self):
pass
def OnBoot(self):
pass
def WebRequiresLogin(self):
pass
def WebRequiresAdmin(self):
pass
def GetWebMenuTitle(self):
pass
def OnWebPreRequest(self, WebSock, sPageName):
pass
def OnWebRequest(self, WebSock, sPageName, Tmpl):
pass
def OnPreRehash(self):
pass
def OnPostRehash(self):
pass
def OnIRCDisconnected(self):
pass
def OnIRCConnected(self):
pass
def OnIRCConnecting(self, IRCSock):
pass
def OnIRCConnectionError(self, IRCSock):
pass
def OnIRCRegistration(self, sPass, sNick, sIdent, sRealName):
pass
def OnBroadcast(self, sMessage):
pass
def OnChanPermission(self, OpNick, Nick, Channel, uMode, bAdded,
bNoChange):
pass
def OnOp(self, OpNick, Nick, Channel, bNoChange):
pass
def OnDeop(self, OpNick, Nick, Channel, bNoChange):
pass
def OnVoice(self, OpNick, Nick, Channel, bNoChange):
pass
def OnDevoice(self, OpNick, Nick, Channel, bNoChange):
pass
def OnMode(self, OpNick, Channel, uMode, sArg, bAdded, bNoChange):
pass
def OnRawMode(self, OpNick, Channel, sModes, sArgs):
pass
def OnRaw(self, sLine):
pass
def OnStatusCommand(self, sCommand):
pass
def OnModCommand(self, sCommand):
pass
def OnModNotice(self, sMessage):
pass
def OnModCTCP(self, sMessage):
pass
def OnQuit(self, Nick, sMessage, vChans):
pass
def OnNick(self, Nick, sNewNick, vChans):
pass
def OnKick(self, OpNick, sKickedNick, Channel, sMessage):
pass
def OnJoining(self, Channel):
pass
def OnJoin(self, Nick, Channel):
pass
def OnPart(self, Nick, Channel, sMessage=None):
pass
def OnInvite(self, Nick, sChan):
pass
def OnChanBufferStarting(self, Chan, Client):
pass
def OnChanBufferEnding(self, Chan, Client):
pass
def OnChanBufferPlayLine(self, Chan, Client, sLine):
pass
def OnPrivBufferPlayLine(self, Client, sLine):
pass
def OnClientLogin(self):
pass
def OnClientDisconnect(self):
pass
def OnUserRaw(self, sLine):
pass
def OnUserCTCPReply(self, sTarget, sMessage):
pass
def OnUserCTCP(self, sTarget, sMessage):
pass
def OnUserAction(self, sTarget, sMessage):
pass
def OnUserMsg(self, sTarget, sMessage):
pass
def OnUserNotice(self, sTarget, sMessage):
pass
def OnUserJoin(self, sChannel, sKey):
pass
def OnUserPart(self, sChannel, sMessage):
pass
def OnUserTopic(self, sChannel, sTopic):
pass
def OnUserTopicRequest(self, sChannel):
pass
def OnUserQuit(self, sMessage):
pass
def OnCTCPReply(self, Nick, sMessage):
pass
def OnPrivCTCP(self, Nick, sMessage):
pass
def OnChanCTCP(self, Nick, Channel, sMessage):
pass
def OnPrivAction(self, Nick, sMessage):
pass
def OnChanAction(self, Nick, Channel, sMessage):
pass
def OnPrivMsg(self, Nick, sMessage):
pass
def OnChanMsg(self, Nick, Channel, sMessage):
pass
def OnPrivNotice(self, Nick, sMessage):
pass
def OnChanNotice(self, Nick, Channel, sMessage):
pass
def OnTopic(self, Nick, Channel, sTopic):
pass
def OnServerCapAvailable(self, sCap):
pass
def OnServerCapResult(self, sCap, bSuccess):
pass
def OnTimerAutoJoin(self, Channel):
pass
def OnEmbeddedWebRequest(self, WebSock, sPageName, Tmpl):
pass
def OnAddNetwork(self, Network, sErrorRet):
pass
def OnDeleteNetwork(self, Network):
pass
def OnSendToClient(self, sLine, Client):
pass
def OnSendToIRC(self, sLine):
pass
# Global modules
def OnAddUser(self, User, sErrorRet):
pass
def OnDeleteUser(self, User):
pass
def OnClientConnect(self, pSock, sHost, uPort):
pass
def OnLoginAttempt(self, Auth):
pass
def OnFailedLogin(self, sUsername, sRemoteIP):
pass
def OnUnknownUserRaw(self, pClient, sLine):
pass
def OnClientCapLs(self, pClient, ssCaps):
pass
def IsClientCapSupported(self, pClient, sCap, bState):
pass
def OnClientCapRequest(self, pClient, sCap, bState):
pass
def OnModuleLoading(self, sModName, sArgs, eType, bSuccess, sRetMsg):
pass
def OnModuleUnloading(self, pModule, bSuccess, sRetMsg):
pass
def OnGetModInfo(self, ModInfo, sModule, bSuccess, sRetMsg):
pass
def OnGetAvailableMods(self, ssMods, eType):
pass
# In python None is allowed value, so python modules may continue using OnMode and not OnMode2
def OnChanPermission2(self, OpNick, Nick, Channel, uMode, bAdded, bNoChange):
return self.OnChanPermission(OpNick, Nick, Channel, uMode, bAdded, bNoChange)
def OnOp2(self, OpNick, Nick, Channel, bNoChange):
return self.OnOp(OpNick, Nick, Channel, bNoChange)
def OnDeop2(self, OpNick, Nick, Channel, bNoChange):
return self.OnDeop(OpNick, Nick, Channel, bNoChange)
def OnVoice2(self, OpNick, Nick, Channel, bNoChange):
return self.OnVoice(OpNick, Nick, Channel, bNoChange)
def OnDevoice2(self, OpNick, Nick, Channel, bNoChange):
return self.OnDevoice(OpNick, Nick, Channel, bNoChange)
def OnMode2(self, OpNick, Channel, uMode, sArg, bAdded, bNoChange):
return self.OnMode(OpNick, Channel, uMode, sArg, bAdded, bNoChange)
def OnRawMode2(self, OpNick, Channel, sModes, sArgs):
return self.OnRawMode(OpNick, Channel, sModes, sArgs)
def OnRawMessage(self, msg):
pass
def OnNumericMessage(self, msg):
pass
# Deprecated non-Message functions should still work, for now.
def OnQuitMessage(self, msg, vChans):
return self.OnQuit(msg.GetNick(), msg.GetReason(), vChans)
def OnNickMessage(self, msg, vChans):
return self.OnNick(msg.GetNick(), msg.GetNewNick(), vChans)
def OnKickMessage(self, msg):
return self.OnKick(msg.GetNick(), msg.GetKickedNick(), msg.GetChan(), msg.GetReason())
def OnJoinMessage(self, msg):
return self.OnJoin(msg.GetNick(), msg.GetChan())
def OnPartMessage(self, msg):
return self.OnPart(msg.GetNick(), msg.GetChan(), msg.GetReason())
def OnChanBufferPlayMessage(self, msg):
modified = String()
old = modified.s = msg.ToString(CMessage.ExcludeTags)
ret = self.OnChanBufferPlayLine(msg.GetChan(), msg.GetClient(), modified)
if old != modified.s:
msg.Parse(modified.s)
return ret
def OnPrivBufferPlayMessage(self, msg):
modified = String()
old = modified.s = msg.ToString(CMessage.ExcludeTags)
ret = self.OnPrivBufferPlayLine(msg.GetClient(), modified)
if old != modified.s:
msg.Parse(modified.s)
return ret
def OnUserRawMessage(self, msg):
pass
def OnUserCTCPReplyMessage(self, msg):
target = String(msg.GetTarget())
text = String(msg.GetText())
ret = self.OnUserCTCPReply(target, text)
msg.SetTarget(target.s)
msg.SetText(text.s)
return ret
def OnUserCTCPMessage(self, msg):
target = String(msg.GetTarget())
text = String(msg.GetText())
ret = self.OnUserCTCP(target, text)
msg.SetTarget(target.s)
msg.SetText(text.s)
return ret
def OnUserActionMessage(self, msg):
target = String(msg.GetTarget())
text = String(msg.GetText())
ret = self.OnUserAction(target, text)
msg.SetTarget(target.s)
msg.SetText(text.s)
return ret
def OnUserTextMessage(self, msg):
target = String(msg.GetTarget())
text = String(msg.GetText())
ret = self.OnUserMsg(target, text)
msg.SetTarget(target.s)
msg.SetText(text.s)
return ret
def OnUserNoticeMessage(self, msg):
target = String(msg.GetTarget())
text = String(msg.GetText())
ret = self.OnUserNotice(target, text)
msg.SetTarget(target.s)
msg.SetText(text.s)
return ret
def OnUserJoinMessage(self, msg):
chan = String(msg.GetTarget())
key = String(msg.GetKey())
ret = self.OnUserJoin(chan, key)
msg.SetTarget(chan.s)
msg.SetKey(key.s)
return ret
def OnUserPartMessage(self, msg):
chan = String(msg.GetTarget())
reason = String(msg.GetReason())
ret = self.OnUserPart(chan, reason)
msg.SetTarget(chan.s)
msg.SetReason(reason.s)
return ret
def OnUserTopicMessage(self, msg):
chan = String(msg.GetTarget())
topic = String(msg.GetTopic())
ret = self.OnUserTopic(chan, topic)
msg.SetTarget(chan.s)
msg.SetTopic(topic.s)
return ret
def OnUserQuitMessage(self, msg):
reason = String(msg.GetReason())
ret = self.OnUserQuit(reason)
msg.SetReason(reason.s)
return ret
def OnCTCPReplyMessage(self, msg):
text = String(msg.GetText())
ret = self.OnCTCPReply(msg.GetNick(), text)
msg.SetText(text.s)
return ret
def OnPrivCTCPMessage(self, msg):
text = String(msg.GetText())
ret = self.OnPrivCTCP(msg.GetNick(), text)
msg.SetText(text.s)
return ret
def OnChanCTCPMessage(self, msg):
text = String(msg.GetText())
ret = self.OnChanCTCP(msg.GetNick(), msg.GetChan(), text)
msg.SetText(text.s)
return ret
def OnPrivActionMessage(self, msg):
text = String(msg.GetText())
ret = self.OnPrivAction(msg.GetNick(), text)
msg.SetText(text.s)
return ret
def OnChanActionMessage(self, msg):
text = String(msg.GetText())
ret = self.OnChanAction(msg.GetNick(), msg.GetChan(), text)
msg.SetText(text.s)
return ret
def OnPrivTextMessage(self, msg):
text = String(msg.GetText())
ret = self.OnPrivMsg(msg.GetNick(), text)
msg.SetText(text.s)
return ret
def OnChanTextMessage(self, msg):
text = String(msg.GetText())
ret = self.OnChanMsg(msg.GetNick(), msg.GetChan(), text)
msg.SetText(text.s)
return ret
def OnPrivNoticeMessage(self, msg):
text = String(msg.GetText())
ret = self.OnPrivNotice(msg.GetNick(), text)
msg.SetText(text.s)
return ret
def OnChanNoticeMessage(self, msg):
text = String(msg.GetText())
ret = self.OnChanNotice(msg.GetNick(), msg.GetChan(), text)
msg.SetText(text.s)
return ret
def OnTopicMessage(self, msg):
topic = String(msg.GetTopic())
ret = self.OnTopic(msg.GetNick(), msg.GetChan(), topic)
msg.SetTopic(topic.s)
return ret
def OnUnknownUserRawMessage(self, msg):
pass
def OnSendToClientMessage(self, msg):
pass
def OnSendToIRCMessage(self, msg):
pass
def make_inherit(cl, parent, attr):
def make_caller(parent, name, attr):
return lambda self, *a: parent.__dict__[name](self.__dict__[attr], *a)
while True:
for x in parent.__dict__:
if not x.startswith('_') and x not in cl.__dict__:
setattr(cl, x, make_caller(parent, x, attr))
if parent.__bases__:
# Multiple inheritance is not supported (yet?)
parent = parent.__bases__[0]
else:
break
make_inherit(Socket, CPySocket, '_csock')
make_inherit(Module, CPyModule, '_cmod')
make_inherit(Timer, CPyTimer, '_ctimer')
class ZNCModuleLoader(importlib.abc.SourceLoader):
def __init__(self, modname, pypath):
self.pypath = pypath
def create_module(self, spec):
self._datadir = spec.loader_state[0]
self._package_dir = spec.loader_state[1]
return super().create_module(spec)
def get_data(self, path):
with open(path, 'rb') as f:
return f.read()
def get_filename(self, fullname):
return self.pypath
class ZNCModuleFinder(importlib.abc.MetaPathFinder):
@staticmethod
def find_spec(fullname, path, target=None):
if fullname == 'znc_modules':
spec = importlib.util.spec_from_loader(fullname, None, is_package=True)
return spec
parts = fullname.split('.')
if parts[0] != 'znc_modules':
return
def dirs():
if len(parts) == 2:
# common case
yield from CModules.GetModDirs()
else:
# the module is a package and tries to load a submodule of it
for libdir in sys.modules['znc_modules.' + parts[1]].__loader__._package_dir:
yield libdir, None
for libdir, datadir in dirs():
finder = importlib.machinery.FileFinder(libdir,
(ZNCModuleLoader, importlib.machinery.SOURCE_SUFFIXES))
spec = finder.find_spec('.'.join(parts[1:]))
if spec:
spec.name = fullname
spec.loader_state = (datadir, spec.submodule_search_locations)
# It almost works with original submodule_search_locations,
# then python will find submodules of the package itself,
# without calling out to ZNCModuleFinder or ZNCModuleLoader.
# But updatemod will be flaky for those submodules because as
# of py3.8 importlib.invalidate_caches() goes only through
# sys.meta_path, but not sys.path_hooks. So we make them load
# through ZNCModuleFinder too, but still remember the original
# dir so that the whole module comes from a single entry in
# CModules.GetModDirs().
spec.submodule_search_locations = []
return spec
sys.meta_path.append(ZNCModuleFinder())
_py_modules = set()
def find_open(modname):
'''Returns (pymodule, datapath)'''
fullname = 'znc_modules.' + modname
for m in _py_modules:
if m.GetModName() == modname:
break
else:
# module is not loaded, clean up previous attempts to load it or even
# to list as available modules
# This is to to let updatemod work
to_remove = []
for m in sys.modules:
if m == fullname or m.startswith(fullname + '.'):
to_remove.append(m)
for m in to_remove:
del sys.modules[m]
try:
module = importlib.import_module(fullname)
except ImportError:
return (None, None)
if not isinstance(module.__loader__, ZNCModuleLoader):
# If modname/ is a directory, it was "loaded" using _NamespaceLoader.
# This is the case for e.g. modperl.
# https://github.com/znc/znc/issues/1757
return (None, None)
return (module, os.path.join(module.__loader__._datadir, modname))
def load_module(modname, args, module_type, user, network, retmsg, modpython):
'''Returns 0 if not found, 1 on loading error, 2 on success'''
if re.search(r'[^a-zA-Z0-9_]', modname) is not None:
retmsg.s = 'Module names can only contain letters, numbers and ' \
'underscores, [{0}] is invalid.'.format(modname)
return 1
pymodule, datapath = find_open(modname)
if pymodule is None:
return 0
if modname not in pymodule.__dict__:
retmsg.s = "Python module [{0}] doesn't have class named [{1}]".format(
pymodule.__file__, modname)
return 1
cl = pymodule.__dict__[modname]
if module_type not in cl.module_types:
retmsg.s = "Module [{}] doesn't support type.".format(modname)
return 1
module = cl()
module._cmod = CreatePyModule(user, network, modname, datapath, module_type, module, modpython)
module.nv = ModuleNV(module._cmod)
module.SetDescription(cl.description)
module.SetArgs(args)
module.SetModPath(pymodule.__file__)
_py_modules.add(module)
if module_type == CModInfo.UserModule:
if not user:
retmsg.s = "Module [{}] is UserModule and needs user.".format(modname)
unload_module(module)
return 1
cont = user
elif module_type == CModInfo.NetworkModule:
if not network:
retmsg.s = "Module [{}] is Network module and needs a network.".format(modname)
unload_module(module)
return 1
cont = network
elif module_type == CModInfo.GlobalModule:
cont = CZNC.Get()
else:
retmsg.s = "Module [{}] doesn't support that module type.".format(modname)
unload_module(module)
return 1
cont.GetModules().append(module._cmod)
try:
loaded = True
if not module.OnLoad(args, retmsg):
if retmsg.s == '':
retmsg.s = 'Module [{0}] aborted.'.format(modname)
else:
retmsg.s = 'Module [{0}] aborted: {1}'.format(modname,
retmsg.s)
loaded = False
except BaseException:
if retmsg.s == '':
retmsg.s = 'Got exception: {0}'.format(traceback.format_exc())
else:
retmsg.s = '{0}; Got exception: {1}'.format(retmsg.s,
traceback.format_exc())
loaded = False
except:
if retmsg.s == '':
retmsg.s = 'Got exception.'
else:
retmsg.s = '{0}; Got exception.'.format(retmsg.s)
loaded = False
if loaded:
if retmsg.s == '':
retmsg.s = "[{0}]".format(pymodule.__file__)
else:
retmsg.s = "[{1}] [{0}]".format(pymodule.__file__,
retmsg.s)
return 2
print(retmsg.s)
unload_module(module)
return 1
def unload_module(module):
if (module not in _py_modules):
return False
module.OnShutdown()
_py_modules.discard(module)
cmod = module._cmod
if module.GetType() == CModInfo.UserModule:
cont = cmod.GetUser()
elif module.GetType() == CModInfo.NetworkModule:
cont = cmod.GetNetwork()
elif module.GetType() == CModInfo.GlobalModule:
cont = CZNC.Get()
cont.GetModules().removeModule(cmod)
del module._cmod
cmod.DeletePyModule()
del cmod
return True
def unload_all():
while len(_py_modules) > 0:
mod = _py_modules.pop()
# add it back to set, otherwise unload_module will be sad
_py_modules.add(mod)
unload_module(mod)
if _cov:
_cov.stop()
def gather_mod_info(cl, modinfo):
translation = CTranslationDomainRefHolder("znc-" + modinfo.GetName())
modinfo.SetDescription(cl.description)
modinfo.SetWikiPage(cl.wiki_page)
modinfo.SetDefaultType(cl.module_types[0])
modinfo.SetArgsHelpText(cl.args_help_text);
modinfo.SetHasArgs(cl.has_args);
for module_type in cl.module_types:
modinfo.AddType(module_type)
def get_mod_info(modname, retmsg, modinfo):
'''0-not found, 1-error, 2-success'''
pymodule, datadir = find_open(modname)
if pymodule is None:
return 0
if modname not in pymodule.__dict__:
retmsg.s = "Python module [{0}] doesn't have class named [{1}]".format(
pymodule.__file__, modname)
return 1
cl = pymodule.__dict__[modname]
modinfo.SetName(modname)
modinfo.SetPath(pymodule.__file__)
gather_mod_info(cl, modinfo)
return 2
def get_mod_info_path(path, modname, modinfo):
try:
x = imp.find_module(modname, [path])
except ImportError:
return 0
# x == (<open file './modules/admin.so', mode 'rb' at 0x7fa2dc748d20>,
# './modules/admin.so', ('.so', 'rb', 3))
# x == (<open file './modules/pythontest.py', mode 'U' at 0x7fa2dc748d20>,
# './modules/pythontest.py', ('.py', 'U', 1))
if x[0] is None and x[2][2] != imp.PKG_DIRECTORY:
return 0
try:
pymodule = imp.load_module(modname, *x)
except ImportError:
return 0
finally:
if x[0]:
x[0].close()
if modname not in pymodule.__dict__:
return 0
cl = pymodule.__dict__[modname]
modinfo.SetName(modname)
modinfo.SetPath(pymodule.__file__)
gather_mod_info(cl, modinfo)
return 1
CONTINUE = CModule.CONTINUE
HALT = CModule.HALT
HALTMODS = CModule.HALTMODS
HALTCORE = CModule.HALTCORE
UNLOAD = CModule.UNLOAD
HaveSSL = HaveSSL_()
HaveIPv6 = HaveIPv6_()
HaveCharset = HaveCharset_()
Version = GetVersion()
VersionMajor = GetVersionMajor()
VersionMinor = GetVersionMinor()
VersionExtra = GetVersionExtra()
def CreateWebSubPage(name, title='', params=dict(), admin=False):
vpair = VPair()
for k, v in params.items():
VPair_Add2Str_(vpair, k, v)
flags = 0
if admin:
flags |= CWebSubPage.F_ADMIN
return CreateWebSubPage_(name, title, vpair, flags)
CUser.GetNetworks = CUser.GetNetworks_
CIRCNetwork.GetChans = CIRCNetwork.GetChans_
CIRCNetwork.GetServers = CIRCNetwork.GetServers_
CIRCNetwork.GetQueries = CIRCNetwork.GetQueries_
CChan.GetNicks = CChan.GetNicks_
CZNC.GetUserMap = CZNC.GetUserMap_
def FreeOwnership(func):
"""
Force release of python ownership of user object when adding it to znc
This solves #462
"""
@wraps(func)
def _wrap(self, obj, *args):
# Bypass if first argument is not an SWIG object (like base type str)
if not hasattr(obj, 'thisown'):
return func(self, obj, *args)
# Change ownership of C++ object from SWIG/python to ZNC core if function was successful
if func(self, obj, *args):
# .thisown is magic SWIG's attribute which makes it call C++ "delete" when python's garbage collector deletes python wrapper
obj.thisown = 0
return True
else:
return False
return _wrap
CZNC.AddListener = FreeOwnership(func=CZNC.AddListener)
CZNC.AddUser = FreeOwnership(func=CZNC.AddUser)
CZNC.AddNetworkToQueue = FreeOwnership(func=CZNC.AddNetworkToQueue)
CUser.AddNetwork = FreeOwnership(func=CUser.AddNetwork)
CIRCNetwork.AddChan = FreeOwnership(func=CIRCNetwork.AddChan)
CModule.AddSocket = FreeOwnership(func=CModule.AddSocket)
CModule.AddSubPage = FreeOwnership(func=CModule.AddSubPage)
class ModulesIter(collections.abc.Iterator):
def __init__(self, cmod):
self._cmod = cmod
def __next__(self):
if self._cmod.is_end():
raise StopIteration
module = self._cmod.get()
self._cmod.plusplus()
return module
CModules.__iter__ = lambda cmod: ModulesIter(CModulesIter(cmod))
# e.g. msg.As(znc.CNumericMessage)
def _CMessage_As(self, cl):
return getattr(self, 'As_' + cl.__name__, lambda: self)()
CMessage.As = _CMessage_As
def str_eq(self, other):
if str(other) == str(self):
return True
return id(self) == id(other)
CChan.__eq__ = str_eq
CNick.__eq__ = str_eq
CUser.__eq__ = str_eq
CIRCNetwork.__eq__ = str_eq
CPyRetString.__eq__ = str_eq
| |
import time
import logging
import calendar
from datetime import datetime
from threading import Thread
from collections import namedtuple
from server.models import Device, DeviceConfiguration, Configuration, Sensor, SensorValue
from server.devices import get_user_function, execute_user_function
from server.functions import get_configuration, parse_value
from server.helpers_thread import write_pidfile_or_fail
from server.forecasting.measurementstorage import MeasurementStorage
from server.devices.base import BaseEnvironment
from server.forecasting.simulation.devices.producers import SimulatedCogenerationUnit, SimulatedPeakLoadBoiler
from server.forecasting.simulation.devices.storages import SimulatedHeatStorage, SimulatedPowerMeter
from server.forecasting.simulation.devices.consumers import SimulatedThermalConsumer, SimulatedElectricalConsumer
from server.forecasting.optimizing.auto_optimization import auto_optimize
DEFAULT_FORECAST_INTERVAL = 14 * 24 * 3600.0
DEFAULT_FORECAST_STEP_SIZE = 15 * 60.0
logger = logging.getLogger('simulation')
""" Return the result of a forecast.
For short-lived forecasts, call this. It will create a :class:`Forecast`
and block, until the forecast is finished.
For parameters see :class:`Forecast`"""
def get_forecast(initial_time, configurations=None, code=None, forward=None):
forecast_object = Forecast(initial_time, configurations, code=code,
forecast=True, forward=forward)
return forecast_object.run().get() #dont start in thread
def get_initialized_scenario(env, configurations):
""" this function returns an initialized scenario.
It creates new simulated devices and connects the right devices.
The devices are restored to the latest state of the |SensorValue|'s in the db,
if there are no Values, a warning will be logged and the standard values are used.
:param env: |env| for all Devices
:param list configurations: the device configurations, which to set in the devices.
These are typically |DeviceConfiguration| objects.
:returns: a :py:class:`namedtuple` of devices, with the acronym (f.e plb), as key
"""
devices = list(Device.objects.all())
device_list = []
for device in devices:
for device_type, class_name in Device.DEVICE_TYPES:
if device.device_type == device_type:
device_class = globals()['Simulated%s' % class_name]
device_list.append(device_class(device.id, env))
for device in device_list:
# connect power devices
device.attach_dependent_devices_in(device_list)
if not device.connected():
logger.error(
"Simulation: Device %s is not connected" % device.name)
raise RuntimeError
# configure devices
for configuration in configurations:
if configuration.device_id == device.id:
value = parse_value(configuration)
if configuration.key in device.config:
device.config[configuration.key] = value
# load latest sensor values
try:
for sensor in Sensor.objects.filter(device_id=device.id):
value = SensorValue.objects.filter(
sensor=sensor).latest('timestamp').value
if sensor.setter != '':
callback = getattr(device, sensor.setter, None)
if callback is not None:
if hasattr(callback, '__call__'):
callback(value)
else:
setattr(device, sensor.setter, value)
except SensorValue.DoesNotExist:
logger.warning("Simulation: No sensor values \
found for sensor '%s' at device '%s'"
% (sensor.name, sensor.device.name))
except Sensor.DoesNotExist:
logger.warning(
'Could not find any sensor values to configure simulation')
# re-calculate values
device.calculate()
# create high performance tuple with device acronyms as field names
device_tuple = namedtuple("Devices", [dev.acronym for dev in device_list])(*device_list)
return device_tuple
class ForecastQueue():
""" A container, holding the running forecasts. Each forecast gets an id.
Usage::
q = ForecastQueue()
f_id = q.schedule_new(initial_time=time.time())
#... do other stuff, then retrieve forecast
result = q.get_by_id(f_id)
"""
forecasts = []
id = 0
def schedule_new(self, initial_time, **kwargs):
""" start a new forecast and return its id.
:param dict kwargs: the parameters for the :class:`Forecast`
"""
self.id += 1
forecast = Forecast(initial_time, **kwargs)
self.forecasts.append((self.id,forecast))
forecast.start()
return self.id
def get_by_id(self, forecast_id):
""" get a forecast by its id.
Will return ``None``, if forecast is not completed.
If the forecast is finished, the result is returned and deleted
from the ForecastQueue.
"""
for index, (_id, forecast) in enumerate(self.forecasts):
if _id == forecast_id:
result = forecast.get()
if result != None:
del self.forecasts[index]
return result
class Forecast(Thread):
""" Setup a Forecast Object. A new |env| and new Devices will be created.
Forecasting can either be ran synchronous or asynchronous (threaded)::
foocast = Forecast(time.time(), forward=10*24*3600)
barcast = Forecast(time.time(), forward=2*24*3600)
#run threaded
barcast.start()
#wait until foocast is finished, then get result
resultfoo = foocast.run().get()
# wait until barcast is finished
while resultbar == None:
resultbar = barcast.get()
:param int initial_time: timestamp of the time, at which the forecast starts
:param configurations: cached configurations, if ``None``, retrieve from database
:param code: code to be executed
:param int forward: Time to forecast. Uses `DEFAULT_FORECAST_INTERVAL` if ``None``
:param boolean forecast: Passed to |env| forecast.
"""
def __init__(self, initial_time, configurations=None, code=None, forward=None, forecast=True):
Thread.__init__(self)
self.daemon = True
demomode = Configuration.objects.get(key='system_mode').value == "demo"
self.env = BaseEnvironment(initial_time=initial_time, forecast=forecast,
step_size=DEFAULT_FORECAST_STEP_SIZE,demomode=demomode) #get_forecast
if configurations is None:
configurations = DeviceConfiguration.objects.all()
self.devices = get_initialized_scenario(self.env, configurations)
self.measurements = MeasurementStorage(self.env, self.devices)
self.user_function = get_user_function(self.devices, code)
self.progress = 0.0
self.result = None
self.forward = forward
if forward == None:
self.forward = DEFAULT_FORECAST_INTERVAL
self.next_optimization = 0.0
self.use_optimization = get_configuration('auto_optimization')
def step(self):
""" execute one step of the simulation.
This steps all devices, auto-optimizes if needed and store the values
"""
execute_user_function(self.env,self.env.forecast,self.devices,self.user_function)
if self.use_optimization and self.next_optimization <= 0.0:
auto_optimize(self)
self.next_optimization = 3600.0
# call step function for all devices
for device in self.devices:
device.step()
self.store_values()
self.env.now += self.env.step_size
self.next_optimization -= self.env.step_size
def run(self):
""" run the main loop. Returns self after finishing.
Results are obtained with :meth:`get`"""
time_remaining = self.forward
while time_remaining > 0:
self.step()
self.progress = (1.0 - time_remaining/float(self.forward)) * 100
time_remaining -= self.env.step_size
self.result = {
'start': datetime.fromtimestamp(self.env.initial_date).isoformat(),
'step': DEFAULT_FORECAST_STEP_SIZE,
'end': datetime.fromtimestamp(self.env.now).isoformat(),
'sensors': self.measurements.get_cached()
}
return self
def store_values(self):
""" sample device values"""
self.measurements.take_and_cache()
def get(self):
""" return the result of the forecast.
If the mainloop is still forecasting, ``None`` is returned.
outputs a dict with::
result = {start: datetime,
step: stepsize,
end: datetime,
sensors: list with values per sensor (see MeasurementStorage)}
"""
return self.result
class DemoSimulation(Forecast):
""" A Forecast, which writes the values to the database.
It replaces the real devices and is used to develop and show the capabilities of ecoControl.
It uses real electrical and weather values instead of forecasts,
the device simulation on the other hand is the same as in :class:`Forecast`.
After calling start(), the simulation will currently run at 30 steps per second (or 30x speed).
This is controlled by the `step_size` in |env|.
The simulation can be forwarded to a certain point by setting the `forward` variable in seconds > 0.
It will then run at maximum speed. The simulation runs until the variable `running` is set to False.
.. note:: DemoSimulations should generally be started with :meth:`start_or_get`
"""
stored_simulation = None
def __init__(self, initial_time, configurations=None):
Forecast.__init__(self, initial_time, configurations, forward=0, forecast=False)
self.steps_per_second = 3600.0 / self.env.step_size
self.running = False
@classmethod
def start_or_get(cls, print_visible=False):
"""
This method starts a new demo simulation
if necessary and it makes sure that only
one demo simulation can run at once.
This is the preferred way to start the demo simulation.
:returns: :class:`DemoSimulation` or ``None`` if system not in demo mode.
"""
# Start demo simulation if in demo mode
system_mode = Configuration.objects.get(key='system_mode')
if system_mode.value != 'demo':
return None
if cls.stored_simulation == None:
if print_visible:
print "Starting demo simulation..."
else:
logger.debug("Starting demo simulation...")
simulation = DemoSimulation(get_initial_time())
simulation.use_optimization = get_configuration('auto_optimization')
simulation.start()
cls.stored_simulation = simulation
return cls.stored_simulation
def run(self):
""" run while `running` is true, call the parent :meth:`step` method.
This method must be called by :meth:`start`, otherwise it immediately returns"""
while self.running:
self.step()
if self.forward > 0:
self.forward -= self.env.step_size
else:
time.sleep(1.0 / self.steps_per_second)
def store_values(self):
"""stores values in database. Overwrites parents saving method.
Values are only stored every (simulated) minute"""
if self.env.now % 60 != 0:
return
self.measurements.take_and_save()
def start(self):
"start the simulation in a seperate thread"
self.running = True
Thread.start(self)
def get_initial_time():
"Return the time of the newest |SensorValue| in the database"
try:
latest_value = SensorValue.objects.latest('timestamp')
return calendar.timegm(latest_value.timestamp.timetuple())
except SensorValue.DoesNotExist:
return 1356998400 # Tuesday 1st January 2013 12:00:00
| |
'''
Data augmentation services supplied with Zen
'''
import re
from amara.lib import U
from amara.lib.date import timezone, UTC
from amara.thirdparty import json
try:
from akara import logger
except ImportError:
logger = None
from zen.services import register_service, zservice
from zen.temporal import smart_parse_date
from zen.akamod import geolookup_service
#from zen.geo import local_geonames
import time; from functools import partial; isobase = partial(time.strftime, "%Y-%m-%dT%H:%M:%S")
#def UU(obj, k): return U(obj[k]) if k in obj and obj[k] is not None and U(k).strip() else u''
def UU(obj, k):
result = U(obj.get(k), noneok=True)
if result is None:
return u''
else:
return result.strip()
GEOCODER = None
@zservice(u'http://purl.org/com/zepheira/augmentation/location')
def augment_location(source, propertyinfo, augmented, failed):
'''
Sample propertyinfo
{
"property": "latlong",
"enabled": True,
"label": "Mapped place",
"tags": ["property:type=location"],
"composite": [
"street_address",
"city",
"state",
"zip"
]
}
A few composite examples
>>> from zen import augmentation
>>> from zen.geo import local_geonames
>>> augmentation.GEOCODER = local_geonames('/Users/uche/.local/lib/akara/geonames.sqlite3')
>>> augmentation.GEOCODER('Superior, CO')
>>> source = [{u"id": u"_1", u"label": u"_1", u"orig": u"text, text, text"}]
>>> propinfo = {u"enabled": True, u"property": u"latlong", u"enabled": True, u"label": "mapped result", u"tags": [u"property:type=location"], u"composite": ["place1", "place2"]}
>>> result = []
>>> failed = {}
>>> augmentation.augment_location(source, propinfo, result, failed)
>>> result
[{u'shredded': [u'text', u'text', u'text'], u'id': u'_1', u'label': u'_1'}]
A few non-composite examples
>>> source = [{u"id": u"_1", u"label": u"_1", u"placename": u"Georgia"}]
>>> propinfo = {u"enabled": True, u"property": u"latlong", u"enabled": True, u"label": "mapped result", u"tags": [u"property:type=location"], u"composite": ["placename"]}
>>> result = []
>>> failed = {}
>>> augmentation.augment_location(source, propinfo, result, failed)
>>> result
[{u'latlong': '{"Georgia": "42,43.5"}', u'id': u'_1', u'label': u'_1'}]
'''
#In the above "Georgia" example, if you wanted the US state instead (83.50,32.71)
#You need to specify heuristics for the geocoder
#It is possible for us to get passed in a data profile which includes a property of type location which is not meant to be augmented.
#In that case there will be no composite param
if not u"composite" in propertyinfo:
return
composite = propertyinfo[u"composite"]
pname = propertyinfo.get(u"property", u'location_latlong')
def each_obj(obj, id):
address_parts = [ UU(obj, k) for k in composite ]
if not any(address_parts):
failed.setdefault(pname, []).append({u'id': id, u'label': obj[u'label'], 'input': address_parts, 'reason': u'No address information found'})
return
location = u', '.join(address_parts)
if logger: logger.debug("location input: " + repr(location))
if GEOCODER:
result = GEOCODER(location)
location_latlong = result.values()[0] if result else ""
else:
#Use an HTTP server for the geoname
location_latlong = geolookup(location)
if location_latlong:
augmented.append({u'id': id, u'label': obj[u'label'],
pname: location_latlong})
else:
failed.setdefault(pname, []).append({u'id': id, u'label': obj[u'label'], 'input': address_parts, 'reason': u'No geolocation possible for address'})
augment_wrapper(source, pname, failed, each_obj, 'augment_location')
return
def augment_wrapper(source, pname, failed, func, opname):
for obj in source:
try:
id = obj[u'id']
func(obj, id)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, e:
if logger: logger.info('Exception in %s: '%opname + repr(e))
failed.setdefault(pname, []).append({u'id': id, u'label': obj[u'label'], 'input': '(masked by exception)', 'reason': repr(e)})
LEN_BASE_ISOFORMAT = 19
@zservice(u'http://purl.org/com/zepheira/augmentation/datetime')
def augment_date(source, propertyinfo, augmented, failed):
'''
Sample propertyinfo
{
"property": "start_date",
"enabled": true,
"label": "Start date",
"tags": ["property:type=date"],
"composite": [
"start"
]
}
>>> from zen import augmentation
>>> source = [{u"id": u"_1", u"label": u"_1", u"end": u"2011-01-01"}]
>>> propinfo = {u"enabled": True, u"property": u"iso_end_date", u"enabled": True, u"label": "ISO end date", u"tags": [u"property:type=date"], "composite": ["end"]}
>>> result = []
>>> failed = {}
>>> augmentation.augment_date(source, propinfo, result, failed)
>>> result
[{u'iso_end_date': '2011-01-01T00:00:00+0000', u'id': u'_1', u'label': u'_1'}]
>>> failed
{}
[{u'shredded': [u'text', u'text', u'text'], u'id': u'_1', u'label': u'_1'}]
'''
#It is possible for us to get passed in a data profile which includes a property of type datewhich is not meant to be augmented.
#In that case there will be no composite param
if not u"composite" in propertyinfo:
return
composite = propertyinfo[u"composite"]
pname = propertyinfo.get(u"property", u'iso_datetime')
def each_obj(obj, id):
#Excel will sometimes give us dates as integers, which reflects in the data set coming back.
#Hence the extra unicode conv.
#FIXME: should fix in freemix.json endpoint and remove from here
date_parts = [ unicode(obj[k]) for k in composite if unicode(obj.get(k, u'')).strip() ]
if not any(date_parts):
failed.setdefault(pname, []).append({u'id': id, u'label': obj[u'label'], 'input': date_parts, 'reason': u'No date information found'})
return
date = u', '.join(date_parts)
if logger: logger.debug("date input: " + repr(date))
#FIXME: Think clearly about timezone here. Consider defaults to come from user profile
clean_date = smart_parse_date(date)
if clean_date:
try:
augmented.append({u'id': id, u'label': obj[u'label'],
pname: isobase(clean_date.utctimetuple()) + UTC.name})
except ValueError:
#strftime cannot handle dates prior to 1900. See: http://docs.python.org/library/datetime.html#strftime-and-strptime-behavior
augmented.append({u'id': id, u'label': obj[u'label'],
pname: clean_date.isoformat()[:LEN_BASE_ISOFORMAT] + UTC.name})
else:
failed.setdefault(pname, []).append({u'id': id, u'label': obj[u'label'], 'input': date_parts, 'reason': u'Unable to parse date'})
augment_wrapper(source, pname, failed, each_obj, 'augment_date')
#if logger: logger.info('Exception in augment_date: ' + repr(e))
return
@zservice(u'http://purl.org/com/zepheira/augmentation/luckygoogle')
def augment_luckygoogle(source, propertyinfo, augmented, failed):
'''
'''
#logger.debug("Not found: " + place)
#It is possible for us to get passed in a data profile which includes a property of type luckygoogle which is not meant to be augmented.
#In that case there will be no composite param
if not u"composite" in propertyinfo:
return
composite = propertyinfo[u"composite"]
pname = propertyinfo.get(u"property", u'luckygoogle')
for obj in source:
try:
objid = obj[u'id']
#Excel will sometimes give us dates as integers, which reflects in the data set coming back.
#Hence the extra unicode conv.
#FIXME: should fix in freemix.json endpoint and remove from here
item = u', '.join([ unicode(obj[k]) for k in composite if unicode(obj.get(k, u'')).strip() ])
link = luckygoogle(item)
if link:
val = items_dict.setdefault(objid, {u'id': objid, u'label': obj[u'label']})
val[pname] = link
except (KeyboardInterrupt, SystemExit):
raise
except Exception, e:
if logger: logger.info('Exception in augment_date: ' + repr(e))
failureinfo = failure_dict.setdefault(objid, {u'id': objid, u'label': obj[u'label']})
failureinfo[pname] = repr(e)
return
@zservice(u'http://purl.org/com/zepheira/augmentation/shredded-list')
def augment_shredded_list(source, propertyinfo, augmented, failed):
'''
See: http://community.zepheira.com/wiki/loc/ValidPatternsList
>>> from zen import augmentation
>>> source = [{u"id": u"_1", u"label": u"_1", u"orig": u"text, text, text"}]
>>> propinfo = {u"delimiter": u",", u"extract": u"orig", u"property": u"shredded", u"enabled": True, u"label": "shredded result", u"tags": [u"property:type=text"]}
>>> result = []
>>> failed = []
>>> augmentation.augment_shredded_list(source, propinfo, result, failed)
>>> result
[{u'shredded': [u'text', u'text', u'text'], u'id': u'_1', u'label': u'_1'}]
>>> source = [{u"id": u"_1", u"label": u"_1", u"orig": u"text, text and text"}]
>>> propinfo = {u"pattern": u"(,)|(and)", u"extract": u"orig", u"property": u"shredded", u"enabled": True, u"label": "shredded result", u"tags": [u"property:type=text"]}
>>> result = []
>>> failed = []
>>> augmentation.augment_shredded_list(source, propinfo, result, failed)
>>> result
[{u'shredded': [u'text', u'text', u'text'], u'id': u'_1', u'label': u'_1'}]
'''
#It is possible for us to get passed in a data profile which includes a property of type shredded_list which is not meant to be augmented.
#In that case there will be no extract param
if not u"extract" in propertyinfo:
return
extract = propertyinfo[u"extract"]
pname = propertyinfo.get(u"property", u'shreddedlist')
pattern = propertyinfo.get(u"pattern")
if pattern: pattern = re.compile(pattern)
delim = propertyinfo.get(u"delimiter", u',')
def each_obj(obj, id):
if pattern:
text = obj[extract]
start = 0
result = []
#FIXME: Needs to be better spec'ed
for m in pattern.finditer(text):
result.append(text[start: m.start()].strip())
start = m.end() + 1
result.append(text[start:].strip())
else:
result = [ item.strip() for item in obj[extract].split(delim) ]
if logger: logger.debug("augment_shredded_list: " + repr((obj[extract], pattern, delim)))
if logger: logger.debug("result: " + repr(result))
if result:
augmented.append({u'id': id, u'label': obj[u'label'],
pname: result})
augment_wrapper(source, pname, failed, each_obj, 'augment_shredded_list')
return
| |
#!/usr/bin/env python
"""This defines some tests for real world clients to be run from the console."""
import os
import re
import socket
import threading
import unittest
import logging
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import flow_utils
from grr.lib import maintenance_utils
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import test_lib
from grr.lib import utils
from grr.lib.flows.console import debugging
from grr.lib.rdfvalues import crypto
def TestFlows(client_id, platform, testname=None, local_worker=False):
"""Test a bunch of flows."""
if platform not in ["windows", "linux", "darwin"]:
raise RuntimeError("Requested operating system not supported.")
# This token is not really used since there is no approval for the
# tested client - these tests are designed for raw access - but we send it
# anyways to have an access reason.
token = access_control.ACLToken(username="test", reason="client testing")
client_id = rdfvalue.RDFURN(client_id)
RunTests(client_id, platform=platform, testname=testname,
token=token, local_worker=local_worker)
def RecursiveListChildren(prefix=None, token=None):
all_urns = set()
act_urns = set([prefix])
while act_urns:
next_urns = set()
for _, children in aff4.FACTORY.MultiListChildren(act_urns, token=token):
for urn in children:
next_urns.add(urn)
all_urns |= next_urns
act_urns = next_urns
return all_urns
class ClientTestBase(test_lib.GRRBaseTest):
"""This is the base class for all client tests."""
platforms = []
flow = None
args = {}
cpu_limit = None
network_bytes_limit = None
__metaclass__ = registry.MetaclassRegistry
def __init__(self, client_id=None, platform=None, local_worker=False,
token=None):
# If we get passed a string, turn it into a urn.
self.client_id = rdfvalue.RDFURN(client_id)
self.platform = platform
self.token = token
self.local_worker = local_worker
super(ClientTestBase, self).__init__(methodName="runTest")
def setUp(self):
# Disable setUp since the cleanup between unit tests does not make sense
# here.
pass
def tearDown(self):
# Disable tearDown since the cleanup between unit tests does not make sense
# here.
pass
def runTest(self):
if self.local_worker:
self.session_id = debugging.StartFlowAndWorker(
self.client_id, self.flow, cpu_limit=self.cpu_limit,
network_bytes_limit=self.network_bytes_limit, **self.args)
else:
self.session_id = flow_utils.StartFlowAndWait(
self.client_id, flow_name=self.flow, cpu_limit=self.cpu_limit,
network_bytes_limit=self.network_bytes_limit, token=self.token,
**self.args)
self.CheckFlow()
def CheckFlow(self):
pass
def DeleteUrn(self, urn):
"""Deletes an object from the db and the index, and flushes the caches."""
data_store.DB.DeleteSubject(urn, token=self.token)
aff4.FACTORY._DeleteChildFromIndex(urn, token=self.token)
aff4.FACTORY.Flush()
def GetGRRBinaryName(self, run_interrogate=True):
client = aff4.FACTORY.Open(self.client_id, mode="r", token=self.token)
self.assertIsInstance(client, aff4.VFSGRRClient)
config = client.Get(aff4.VFSGRRClient.SchemaCls.GRR_CONFIGURATION)
if config is None:
# Try running Interrogate once.
if run_interrogate:
flow_utils.StartFlowAndWait(self.client_id,
flow_name="Interrogate", token=self.token)
return self.GetGRRBinaryName(run_interrogate=False)
else:
self.fail("No valid configuration found, interrogate the client before "
"running this test.")
else:
self.binary_name = config["Client.binary_name"]
return self.binary_name
class LocalClientTest(ClientTestBase):
def runTest(self):
if not self.local_worker:
print ("This test uses a flow that is debug only. Use a local worker"
" to run this test.")
return
super(LocalClientTest, self).runTest()
class TestGetFileTSKLinux(ClientTestBase):
"""Tests if GetFile works on Linux using Sleuthkit."""
platforms = ["linux"]
flow = "GetFile"
args = {"pathspec": rdfvalue.PathSpec(
path="/bin/ls",
pathtype=rdfvalue.PathSpec.PathType.TSK)}
# Interpolate for /dev/mapper-...
output_path = "/fs/tsk/.*/bin/ls"
def CheckFlow(self):
pos = self.output_path.find("*")
if pos > 0:
prefix = self.client_id.Add(self.output_path[:pos])
for urn in RecursiveListChildren(prefix=prefix):
if re.search(self.output_path + "$", str(urn)):
self.to_delete = urn
return self.CheckFile(aff4.FACTORY.Open(urn))
else:
urn = self.client_id.Add(self.output_path)
fd = aff4.FACTORY.Open(urn)
if isinstance(fd, aff4.BlobImage):
return self.CheckFile(fd)
self.fail("Output file not found.")
def CheckFile(self, fd):
data = fd.Read(10)
self.assertEqual(data[1:4], "ELF")
def tearDown(self):
super(TestGetFileTSKLinux, self).tearDown()
if hasattr(self, "to_delete"):
urn = self.to_delete
else:
urn = self.client_id.Add(self.output_path)
self.DeleteUrn(urn)
# Make sure the deletion acutally worked.
self.assertRaises(AssertionError, self.CheckFlow)
class TestGetFileTSKMac(TestGetFileTSKLinux):
"""Tests if GetFile works on Mac using Sleuthkit."""
platforms = ["darwin"]
def CheckFile(self, fd):
data = fd.Read(10)
self.assertEqual(data[:4], "\xca\xfe\xba\xbe")
class TestGetFileOSLinux(TestGetFileTSKLinux):
"""Tests if GetFile works on Linux."""
args = {"pathspec": rdfvalue.PathSpec(
path="/bin/ls",
pathtype=rdfvalue.PathSpec.PathType.OS)}
output_path = "/fs/os/bin/ls"
class TestSendFile(ClientTestBase):
platforms = ["linux"]
flow = "SendFile"
key = rdfvalue.AES128Key("1a5eafcc77d428863d4c2441ea26e5a5")
iv = rdfvalue.AES128Key("2241b14c64874b1898dad4de7173d8c0")
args = dict(host="127.0.0.1",
port=12345,
pathspec=rdfvalue.PathSpec(pathtype=0, path="/bin/ls"),
key=key,
iv=iv)
def setUp(self):
logging.info("This test only works if the client is running on localhost!!")
class Listener(threading.Thread):
result = []
daemon = True
def run(self):
for res in socket.getaddrinfo(
None, 12345, socket.AF_INET,
socket.SOCK_STREAM, 0, socket.AI_ADDRCONFIG):
af, socktype, proto, _, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error:
s = None
continue
try:
s.bind(sa)
s.listen(1)
except socket.error:
s.close()
s = None
continue
break
conn, _ = s.accept()
while 1:
data = conn.recv(1024)
if not data: break
self.result.append(data)
conn.close()
self.listener = Listener()
self.listener.start()
def CheckFlow(self):
original_data = open("/bin/ls", "rb").read()
received_cipher = "".join(self.listener.result)
cipher = crypto.AES128CBCCipher(key=self.key, iv=self.iv,
mode=crypto.AES128CBCCipher.OP_DECRYPT)
received_data = cipher.Update(received_cipher) + cipher.Final()
self.assertEqual(received_data, original_data)
class TestListDirectoryOSLinux(ClientTestBase):
"""Tests if ListDirectory works on Linux."""
platforms = ["linux", "darwin"]
flow = "ListDirectory"
args = {"pathspec": rdfvalue.PathSpec(
path="/bin",
pathtype=rdfvalue.PathSpec.PathType.OS)}
output_path = "/fs/os/bin"
file_to_find = "ls"
def CheckFlow(self):
pos = self.output_path.find("*")
urn = None
if pos > 0:
base_urn = self.client_id.Add(self.output_path[:pos])
for urn in RecursiveListChildren(prefix=base_urn):
if re.search(self.output_path + "$", str(urn)):
self.to_delete = urn
break
self.assertNotEqual(urn, None, "Could not locate Directory.")
else:
urn = self.client_id.Add(self.output_path)
fd = aff4.FACTORY.Open(urn.Add(self.file_to_find),
mode="r", token=self.token)
self.assertEqual(type(fd), aff4.VFSFile)
def tearDown(self):
super(TestListDirectoryOSLinux, self).tearDown()
if hasattr(self, "to_delete"):
urn = self.to_delete
else:
urn = self.client_id.Add(self.output_path)
self.DeleteUrn(urn.Add(self.file_to_find))
self.DeleteUrn(urn)
# Make sure the deletion acutally worked.
self.assertRaises(AssertionError, self.CheckFlow)
class TestListDirectoryTSKLinux(TestListDirectoryOSLinux):
"""Tests if ListDirectory works on Linux using Sleuthkit."""
args = {"pathspec": rdfvalue.PathSpec(
path="/bin",
pathtype=rdfvalue.PathSpec.PathType.TSK)}
output_path = "/fs/tsk/.*/bin"
class TestFindTSKLinux(TestListDirectoryTSKLinux):
"""Tests if the find flow works on Linux using Sleuthkit."""
flow = "FindFiles"
args = {"findspec": rdfvalue.FindSpec(
path_regex=".",
pathspec=rdfvalue.PathSpec(
path="/bin/",
pathtype=rdfvalue.PathSpec.PathType.TSK))}
class TestFindOSLinux(TestListDirectoryOSLinux):
"""Tests if the find flow works on Linux."""
flow = "FindFiles"
args = {"findspec": rdfvalue.FindSpec(
path_regex=".",
pathspec=rdfvalue.PathSpec(
path="/bin/",
pathtype=rdfvalue.PathSpec.PathType.OS))}
class TestClientInterrogateEndToEnd(ClientTestBase):
"""Tests the Interrogate flow on windows."""
platforms = ["windows", "linux", "darwin"]
flow = "Interrogate"
attributes = [aff4.VFSGRRClient.SchemaCls.GRR_CONFIGURATION,
aff4.VFSGRRClient.SchemaCls.MAC_ADDRESS,
aff4.VFSGRRClient.SchemaCls.HOSTNAME,
aff4.VFSGRRClient.SchemaCls.INSTALL_DATE,
aff4.VFSGRRClient.SchemaCls.CLIENT_INFO,
aff4.VFSGRRClient.SchemaCls.OS_RELEASE,
aff4.VFSGRRClient.SchemaCls.OS_VERSION,
aff4.VFSGRRClient.SchemaCls.USERNAMES]
def setUp(self):
super(TestClientInterrogateEndToEnd, self).setUp()
data_store.DB.DeleteAttributes(self.client_id, [
str(attribute) for attribute in self.attributes], sync=True)
aff4.FACTORY.Flush()
self.assertRaises(AssertionError, self.CheckFlow)
def CheckFlow(self):
fd = aff4.FACTORY.Open(self.client_id, mode="r", token=self.token)
self.assertIsInstance(fd, aff4.VFSGRRClient)
for attribute in self.attributes:
value = fd.Get(attribute)
self.assertTrue(value is not None, "Attribute %s is None." % attribute)
self.assertTrue(str(value))
class TestListDirectoryOSWindows(TestListDirectoryOSLinux):
"""Tests if ListDirectory works on Linux."""
platforms = ["windows"]
args = {"pathspec": rdfvalue.PathSpec(
path="C:\\Windows",
pathtype=rdfvalue.PathSpec.PathType.OS)}
file_to_find = "regedit.exe"
output_path = "/fs/os/C:/Windows"
class TestListDirectoryTSKWindows(TestListDirectoryTSKLinux):
"""Tests if ListDirectory works on Windows using Sleuthkit."""
platforms = ["windows"]
args = {"pathspec": rdfvalue.PathSpec(
path="C:\\Windows",
pathtype=rdfvalue.PathSpec.PathType.TSK)}
file_to_find = "regedit.exe"
def CheckFlow(self):
found = False
# XP has uppercase...
for windir in ["Windows", "WINDOWS"]:
urn = self.client_id.Add("/fs/tsk")
fd = aff4.FACTORY.Open(urn, mode="r", token=self.token)
volumes = list(fd.OpenChildren())
for volume in volumes:
fd = aff4.FACTORY.Open(volume.urn.Add(windir), mode="r",
token=self.token)
children = list(fd.OpenChildren())
for child in children:
if self.file_to_find == child.urn.Basename():
# We found what we were looking for.
found = True
self.to_delete = child.urn
break
self.assertTrue(found)
class TestRecursiveListDirectoryOSWindows(TestListDirectoryOSWindows):
flow = "RecursiveListDirectory"
args = {"pathspec": rdfvalue.PathSpec(
path="C:\\",
pathtype=rdfvalue.PathSpec.PathType.OS),
"max_depth": 1}
file_to_find = "regedit.exe"
output_path = "/fs/os/C:/Windows"
class TestFindWindowsRegistry(ClientTestBase):
"""Test that user listing from the registry works.
We basically list the registry and then run Find on the same place, we expect
a single ProfileImagePath value for each user.
"""
platforms = ["windows"]
reg_path = ("/HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Windows NT/"
"CurrentVersion/ProfileList/")
output_path = "analysis/find/test"
def runTest(self):
"""Launch our flows."""
for flow, args in [
("ListDirectory", {"pathspec": rdfvalue.PathSpec(
pathtype=rdfvalue.PathSpec.PathType.REGISTRY,
path=self.reg_path)}),
("FindFiles", {"findspec": rdfvalue.FindSpec(
pathspec=rdfvalue.PathSpec(
path=self.reg_path,
pathtype=rdfvalue.PathSpec.PathType.REGISTRY),
path_regex="ProfileImagePath"),
"output": self.output_path})]:
if self.local_worker:
self.session_id = debugging.StartFlowAndWorker(
self.client_id, flow, cpu_limit=self.cpu_limit,
network_bytes_limit=self.network_bytes_limit, **args)
else:
self.session_id = flow_utils.StartFlowAndWait(
self.client_id, flow_name=flow, cpu_limit=self.cpu_limit,
network_bytes_limit=self.network_bytes_limit, token=self.token,
**args)
self.CheckFlow()
def CheckFlow(self):
"""Check that all profiles listed have an ProfileImagePath."""
urn = self.client_id.Add("registry").Add(self.reg_path)
fd = aff4.FACTORY.Open(urn, mode="r", token=self.token)
user_accounts = sorted([x.urn for x in fd.OpenChildren()
if x.urn.Basename().startswith("S-")])
urn = self.client_id.Add(self.output_path)
fd = aff4.FACTORY.Open(urn, token=self.token)
hits = sorted([x.aff4path for x in fd])
self.assertGreater(len(hits), 1)
self.assertEqual(len(hits), len(user_accounts))
for x, y in zip(user_accounts, hits):
self.assertEqual(x.Add("ProfileImagePath"), y)
class TestGetFileOSWindows(TestGetFileOSLinux):
"""Tests if GetFile works on Windows."""
platforms = ["windows"]
args = {"pathspec": rdfvalue.PathSpec(
path="C:\\Windows\\regedit.exe",
pathtype=rdfvalue.PathSpec.PathType.OS)}
output_path = "/fs/os/C:/Windows/regedit.exe"
def CheckFile(self, fd):
data = fd.Read(10)
self.assertEqual(data[:2], "MZ")
class TestGetFileTSKWindows(TestGetFileOSWindows):
"""Tests if GetFile works on Windows using TSK."""
args = {"pathspec": rdfvalue.PathSpec(
path="C:\\Windows\\regedit.exe",
pathtype=rdfvalue.PathSpec.PathType.TSK)}
def CheckFlow(self):
urn = self.client_id.Add("/fs/tsk")
fd = aff4.FACTORY.Open(urn, mode="r", token=self.token)
volumes = list(fd.OpenChildren())
found = False
for volume in volumes:
file_urn = volume.urn.Add("Windows/regedit.exe")
fd = aff4.FACTORY.Open(file_urn, mode="r",
token=self.token)
try:
data = fd.Read(10)
if data[:2] == "MZ":
found = True
self.to_delete = file_urn
break
except AttributeError:
# If the file does not exist on this volume, Open returns a aff4volume
# which does not have a Read method.
pass
self.assertTrue(found)
class TestClientRegistry(ClientTestBase):
"""Tests if listing registry keys works on Windows."""
platforms = ["windows"]
flow = "ListDirectory"
args = {"pathspec": rdfvalue.PathSpec(
path="HKEY_LOCAL_MACHINE",
pathtype=rdfvalue.PathSpec.PathType.REGISTRY)}
output_path = "/registry/HKEY_LOCAL_MACHINE"
def CheckFlow(self):
urn = self.client_id.Add(self.output_path)
fd = aff4.FACTORY.Open(urn, mode="r", token=self.token)
children = list(fd.OpenChildren())
self.assertTrue("SYSTEM" in [os.path.basename(utils.SmartUnicode(child.urn))
for child in children])
def tearDown(self):
urn = self.client_id.Add(self.output_path)
data_store.DB.DeleteSubject(str(urn.Add("SYSTEM")), token=self.token)
data_store.DB.DeleteSubject(str(urn), token=self.token)
def RunTests(client_id=None, platform=None, testname=None,
token=None, local_worker=False):
runner = unittest.TextTestRunner()
for cls in ClientTestBase.classes.values():
if testname is not None and testname != cls.__name__:
continue
if not aff4.issubclass(cls, ClientTestBase):
continue
if platform in cls.platforms:
print "Running %s." % cls.__name__
try:
runner.run(cls(client_id=client_id, platform=platform,
token=token, local_worker=local_worker))
except Exception: # pylint: disable=broad-except
logging.exception("Failed to run test %s", cls)
class TestCPULimit(LocalClientTest):
platforms = ["linux", "windows", "darwin"]
flow = "CPULimitTestFlow"
cpu_limit = 7
def CheckFlow(self):
# Reopen the object to update the state.
flow_obj = aff4.FACTORY.Open(self.session_id, token=self.token)
backtrace = flow_obj.state.context.get("backtrace", "")
if backtrace:
if "BusyHang not available" in backtrace:
print "Client does not support this test."
else:
self.assertTrue("CPU limit exceeded." in backtrace)
else:
self.fail("Flow did not raise the proper error.")
class TestNetworkFlowLimit(ClientTestBase):
platforms = ["linux", "darwin"]
flow = "GetFile"
network_bytes_limit = 500 * 1024
args = {"pathspec": rdfvalue.PathSpec(path="/bin/bash",
pathtype=rdfvalue.PathSpec.PathType.OS)}
output_path = "/fs/os/bin/bash"
def setUp(self):
self.urn = self.client_id.Add(self.output_path)
self.DeleteUrn(self.urn)
fd = aff4.FACTORY.Open(self.urn, mode="r", token=self.token)
self.assertEqual(type(fd), aff4.AFF4Volume)
def CheckFlow(self):
# Reopen the object to update the state.
flow_obj = aff4.FACTORY.Open(self.session_id, token=self.token)
# Make sure we transferred approximately the right amount of data.
self.assertAlmostEqual(flow_obj.state.context.network_bytes_sent,
self.network_bytes_limit, delta=30000)
backtrace = flow_obj.state.context.get("backtrace", "")
self.assertTrue("Network bytes limit exceeded." in backtrace)
class TestMultiGetFileNetworkLimitExceeded(LocalClientTest):
platforms = ["linux", "darwin"]
flow = "NetworkLimitTestFlow"
args = {}
network_bytes_limit = 3 * 512 * 1024
def CheckFlow(self):
# Reopen the object to update the state.
flow_obj = aff4.FACTORY.Open(self.session_id, token=self.token)
backtrace = flow_obj.state.context.get("backtrace", "")
self.assertTrue("Network bytes limit exceeded." in backtrace)
self.output_path = flow_obj.state.dest_path.path
self.urn = self.client_id.Add(self.output_path)
fd = aff4.FACTORY.Open(self.urn, mode="r", token=self.token)
self.assertEqual(type(fd), aff4.AFF4Volume)
class TestMultiGetFile(LocalClientTest):
platforms = ["linux", "darwin"]
flow = "MultiGetFileTestFlow"
args = {}
def CheckFlow(self):
# Reopen the object to update the state.
flow_obj = aff4.FACTORY.Open(self.session_id, token=self.token)
# Check flow completed normally, checking is done inside the flow
self.assertEqual(
flow_obj.state.context.state, rdfvalue.Flow.State.TERMINATED)
self.assertFalse(flow_obj.state.context.get("backtrace", ""))
class TestProcessListing(ClientTestBase):
platforms = ["linux", "windows", "darwin"]
flow = "ListProcesses"
args = {"output": "analysis/ListProcesses/testing"}
def setUp(self):
super(TestProcessListing, self).setUp()
self.process_urn = self.client_id.Add(self.args["output"])
self.DeleteUrn(self.process_urn)
self.assertRaises(AssertionError, self.CheckFlow)
def CheckFlow(self):
procs = aff4.FACTORY.Open(self.process_urn, mode="r", token=self.token)
self.assertIsInstance(procs, aff4.RDFValueCollection)
process_list = list(procs)
# Make sure there are at least some results.
self.assertGreater(len(process_list), 5)
expected_name = self.GetGRRBinaryName()
for p in process_list:
if expected_name in p.exe:
return
self.fail("Process listing does not contain %s." % expected_name)
class TestNetstat(ClientTestBase):
platforms = ["linux", "windows", "darwin"]
flow = "Netstat"
def setUp(self):
super(TestNetstat, self).setUp()
self.network_urn = self.client_id.Add("network")
self.DeleteUrn(self.network_urn)
self.assertRaises(AssertionError, self.CheckFlow)
def CheckFlow(self):
netstat = aff4.FACTORY.Open(self.network_urn, mode="r", token=self.token)
self.assertIsInstance(netstat, aff4.Network)
connections = netstat.Get(netstat.Schema.CONNECTIONS)
self.assertGreater(len(connections), 5)
# There should be at least two local IPs.
num_ips = set([k.local_address.ip for k in connections])
self.assertGreater(len(num_ips), 1)
# There should be at least two different connection states.
num_states = set([k.state for k in connections])
# This is a known issue on CentOS so we just warn about it.
if len(num_states) <= 1:
logging.warning("Only received one distinct connection state!")
class TestGetClientStats(ClientTestBase):
platforms = ["linux", "windows", "darwin"]
flow = "GetClientStats"
def setUp(self):
super(TestGetClientStats, self).setUp()
self.stats_urn = self.client_id.Add("stats")
self.DeleteUrn(self.stats_urn)
self.assertRaises(AssertionError, self.CheckFlow)
def CheckFlow(self):
aff4.FACTORY.Flush()
client_stats = aff4.FACTORY.Open(self.stats_urn, token=self.token)
self.assertIsInstance(client_stats, aff4.ClientStats)
stats = list(client_stats.Get(client_stats.Schema.STATS))
self.assertGreater(len(stats), 0)
entry = stats[0]
self.assertGreater(entry.RSS_size, 0)
self.assertGreater(entry.VMS_size, 0)
self.assertGreater(entry.boot_time, 0)
self.assertGreater(entry.bytes_received, 0)
self.assertGreater(entry.bytes_sent, 0)
self.assertGreater(entry.memory_percent, 0)
self.assertGreater(len(list(entry.cpu_samples)), 0)
if not list(entry.io_samples):
logging.warning("No IO samples received. This is ok if the tested"
" client is a mac.")
class FingerPrintTestBase(object):
flow = "FingerprintFile"
def setUp(self):
self.urn = self.client_id.Add(self.output_path)
self.DeleteUrn(self.urn)
self.assertRaises(AssertionError, self.CheckFlow)
def CheckFlow(self):
fd = aff4.FACTORY.Open(self.urn)
fp = fd.Get(fd.Schema.FINGERPRINT)
self.assertNotEqual(fp, None)
results = list(fp.results)
self.assertGreater(len(results), 0)
result = results[0]
self.assertTrue("md5" in result)
self.assertEqual(len(result["md5"]), 16)
self.assertTrue("sha1" in result)
self.assertEqual(len(result["sha1"]), 20)
self.assertTrue("sha256" in result)
self.assertEqual(len(result["sha256"]), 32)
class TestFingerprintFileOSLinux(FingerPrintTestBase, TestGetFileOSLinux):
"""Tests if Fingerprinting works on Linux."""
class TestFingerprintFileOSWindows(FingerPrintTestBase, TestGetFileOSWindows):
"""Tests if Fingerprinting works on Windows."""
class TestAnalyzeClientMemory(ClientTestBase):
platforms = ["windows"]
flow = "AnalyzeClientMemory"
args = {"request": rdfvalue.VolatilityRequest(plugins=["pslist"],
args={"pslist": {}}),
"output": "analysis/pslist/testing"}
def setUp(self):
super(TestAnalyzeClientMemory, self).setUp()
self.urn = self.client_id.Add(self.args["output"])
self.DeleteUrn(self.urn)
self.assertRaises(AssertionError, self.CheckFlow)
def CheckFlow(self):
response = aff4.FACTORY.Open(self.urn, token=self.token)
self.assertIsInstance(response, aff4.RDFValueCollection)
self.assertEqual(len(response), 1)
result = response[0]
self.assertEqual(result.error, "")
self.assertGreater(len(result.sections), 0)
rows = result.sections[0].table.rows
self.assertGreater(len(rows), 0)
expected_name = self.GetGRRBinaryName()
for values in rows:
for value in values.values:
if value.name == "ImageFileName":
if expected_name == value.svalue:
return
self.fail("Process listing does not contain %s." % expected_name)
class TestGrepMemory(ClientTestBase):
platforms = ["windows", "darwin"]
flow = "ScanMemory"
def setUp(self):
self.args = {"also_download": False,
"grep": rdfvalue.BareGrepSpec(
literal="grr", length=4*1024*1024*1024,
mode=rdfvalue.GrepSpec.Mode.FIRST_HIT,
bytes_before=10, bytes_after=10),
"output": "analysis/grep/testing",}
super(TestGrepMemory, self).setUp()
self.urn = self.client_id.Add(self.args["output"])
self.DeleteUrn(self.urn)
self.assertRaises(AssertionError, self.CheckFlow)
def CheckFlow(self):
collection = aff4.FACTORY.Open(self.urn, token=self.token)
self.assertIsInstance(collection, aff4.RDFValueCollection)
self.assertEqual(len(list(collection)), 1)
reference = collection[0]
self.assertEqual(reference.length, 23)
self.assertEqual(reference.data[10:10+3], "grr")
class TestLaunchBinaries(ClientTestBase):
"""Test that we can launch a binary.
The following program is used and will be signed and uploaded before
executing the test if it hasn't been uploaded already.
#include <stdio.h>
int main(int argc, char** argv) {
printf("Hello world!!!");
return 0;
};
"""
platforms = ["windows", "linux"]
flow = "LaunchBinary"
filenames = {"windows": "hello.exe",
"linux": "hello"}
def __init__(self, **kwargs):
super(TestLaunchBinaries, self).__init__(**kwargs)
self.context = ["Platform:%s" % self.platform.title()]
self.binary = config_lib.CONFIG.Get(
"Executables.aff4_path", context=self.context).Add(
"test/%s" % self.filenames[self.platform])
self.args = dict(binary=self.binary)
try:
aff4.FACTORY.Open(self.binary, aff4_type="GRRSignedBlob",
token=self.token)
except IOError:
print "Uploading the test binary to the Executables area."
source = os.path.join(config_lib.CONFIG["Test.data_dir"],
self.filenames[self.platform])
maintenance_utils.UploadSignedConfigBlob(
open(source, "rb").read(), aff4_path=self.binary,
client_context=self.context, token=self.token)
def CheckFlow(self):
# Check that the test binary returned the correct stdout:
fd = aff4.FACTORY.Open(self.session_id, age=aff4.ALL_TIMES,
token=self.token)
logs = "\n".join(
[str(x) for x in fd.GetValuesForAttribute(fd.Schema.LOG)])
self.assertTrue("Hello world" in logs)
class TestCollector(ClientTestBase):
platforms = ["windows"]
flow = "ArtifactCollectorFlow"
args = {"output": "analysis/artifact/testing",
"artifact_list": ["VolatilityPsList"],
"store_results_in_aff4": False}
def setUp(self):
super(TestCollector, self).setUp()
self.urn = self.client_id.Add(self.args["output"])
self.DeleteUrn(self.urn)
def CheckFlow(self):
collection = aff4.FACTORY.Open(self.urn, token=self.token)
self.assertIsInstance(collection, aff4.RDFValueCollection)
self.assertTrue(len(list(collection)) > 40)
class TestSearchFiles(ClientTestBase):
platforms = ["linux"]
flow = "SearchFileContent"
args = {"output": "analysis/SearchFiles/testing",
"paths": ["/bin/ls*"],
"also_download": True}
def setUp(self):
super(TestSearchFiles, self).setUp()
self.urn = self.client_id.Add(self.args["output"])
self.DeleteUrn(self.urn)
self.assertRaises(Exception, self.CheckFlow)
def CheckFlow(self):
results = aff4.FACTORY.Open(self.urn, token=self.token)
self.assertGreater(len(results), 1)
for result in results:
self.assertTrue(result.pathspec.path.startswith("/bin/ls"))
class TestSearchFilesGrep(TestSearchFiles):
args = {"output": "analysis/SearchFilesGrep/testing",
"paths": ["/bin/ls*"],
"grep": rdfvalue.BareGrepSpec(literal="ELF"),
"also_download": True}
def CheckFlow(self):
results = aff4.FACTORY.Open(self.urn, token=self.token)
self.assertGreater(len(results), 1)
for result in results:
self.assertTrue("ELF" in result.data)
self.assertTrue("ls" in result.pathspec.path)
| |
# coding=utf-8
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Negative Sampling Skip-Gram Model for training contrastive word embeddings
Trains word embeddings on a text collection with focus on the contrast to other collections
about the same topic. Text collections are assumed to be similar and have a large overlap
in vocabulary.
This is a modified version of tensorflow/tensorflow/models/embedding/word2vec_optimized.py
The key modifications lies in sampling not from the target text collection ("training data"),
but from all other text collections ("sample data") in a leave-one-out fashion.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
import tensorflow.python.platform
from six.moves import xrange # pylint: disable=redefined-builtin
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import cPickle as pkl
from tensorflow.models.embedding import gen_word2vec as word2vec
import codecs
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 5,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 1,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 10,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy('france', 'paris', 'russia') and "
"model.nearby(['proton', 'elephant', 'maxwell']")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval.")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval.")
flags.DEFINE_string("name", "model","Name to identify the model")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Model identifier
self.name = FLAGS.name
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = {}
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [emb_dim].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
print("Sample words %d %s " % (len(opts.sample_words),opts.sample_words))
print("Vocab words %d %s " % (len(opts.vocab_words), opts.vocab_words))
print("Sample counts %d %s " % (len(opts.sample_counts.tolist()),opts.sample_counts.tolist()))
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=len(opts.sample_words),#NEW
distortion=0.75,
unigrams=opts.sample_counts.tolist())) #NEW: only sample from other collections
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.mul(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise lables for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
true_logits, tf.ones_like(true_logits))
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
sampled_logits, tf.zeros_like(sampled_logits))
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
###NEW: read sampling corpus (=all files in same dir as train_data except for training data)
full_path = os.path.realpath(opts.train_data)
path, filename = os.path.split(full_path)
sampling_files = []
for file in os.listdir(path):
if file.endswith(".txt") or file.endswith(".tok") and file != filename:
sampling_files.append(path+"/"+file)
print("Files for sampling: ", ", ".join(sampling_files))
#write new file as concat of all sampling files
sample_data = opts.train_data+".sample"
sample_train_data = sample_data+".train"
o = codecs.open(sample_data, "w", "utf8")
oo = codecs.open(sample_train_data, "w", "utf8")
for sampling_file in sampling_files:
f = open(sampling_file,"r")
t = f.read()
o.write(t.decode("utf8")+" ") #concat all files
oo.write(t.decode("utf8")+" ")
f.close()
o.close()
t = codecs.open(opts.train_data, "r", "utf8")
oo.write(t.read().decode("utf8"))
t.close()
oo.close()
# The sampling data. A text file.
(words_samples, counts_samples, words_per_epoch_samples, b_epoch_samples, b_words_samples, examples_samples,
labels_samples) = word2vec.skipgram(filename=sample_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
#Sampling plus training data for getting full vocabulary for embeddings
(words_samples_train, counts_samples_train, words_per_epoch_samples_train, b_epoch_samples_train, b_words_samples_train, examples_samples_train,
labels_samples_train) = word2vec.skipgram(filename=sample_train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.all_words, opts.all_counts,
all_words_per_epoch) = self._session.run([words_samples_train, counts_samples_train, words_per_epoch])
(opts.sample_words, opts.sample_counts,
sample_words_per_epoch) = self._session.run([words_samples, counts_samples, words_per_epoch])
#first add sample words
for s in opts.sample_words:
last_index = len(self._word2id)
self._word2id.setdefault(s,last_index)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
#then add training words
for v in opts.vocab_words:
last_index = len(self._word2id)
self._word2id.setdefault(v,last_index)
print("Word2id: ", self._word2id)
opts.vocab_size = len(self._word2id) #NOTE: wc20(train)+wc(sample) != wc20(train+sample) -> therefore use word2id (proper union)
print("Sample file: ", sample_data)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples_samples
self._labels = labels_samples
#self._id2word = opts.all_words
#for i, w in enumerate(self._id2word):
for (w,i) in self._word2id.iteritems():
self._id2word[i] = w
print("id2word: ", self._id2word)
true_logits, sampled_logits = self.forward(examples_samples, labels_samples)
loss = self.nce_loss(true_logits, sampled_logits)
tf.scalar_summary("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with codecs.open(os.path.join(opts.save_path, "vocab.txt"), "w", "utf8") as f:
for i in xrange(opts.vocab_size):
f.write("%s %d\n" % (tf.compat.as_text(opts.all_words[i]),
opts.all_counts[i]))
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(opts.save_path,
graph_def=self._session.graph_def)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
opts.save_path + "model",
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def nearby(self, words, num=40):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def plot_with_labels(low_dim_embs, labels, filename="plots/.tsne.png"):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) #in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.save_path:
print("--train_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
model = Word2Vec(opts, session)
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
#model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, opts.name+"."+"model.ckpt"),
global_step=model.global_step)
model.nearby(['Switzerland'])
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = len(model._id2word)
final_embeddings = model._emb.eval(session)
pkl.dump(final_embeddings,open("embeddings/"+opts.name+".emb.pkl","wb"))
pkl.dump(model._word2id, open("dicts/"+opts.name+".w2i.pkl","wb"))
pkl.dump(model._id2word, open("dicts/"+opts.name+".i2w.pkl","wb"))
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
print(zip(model._id2word.iteritems(),low_dim_embs))
labels = [model._id2word[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels,"plots/"+opts.name+".tsne.png")
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy('france', 'paris', 'russia')
# [1]: model.nearby(['proton', 'elephant', 'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
| |
"""
HMC Monte-Carlo sampling from a power-law mass function
We assume a set of masses drawn from a power-law distribution. The distribution
is defined by 3 parameters: the index alpha, the lower and upper mass range
M_min, M_max.
theta = { alpha, M_min, M_max }
Let's presume known M_min and M_max such that we only look at the posterior of
alpha. We know that in fact M_max and M_min cannot be infered because they are
most likely outside from the mass range covered by the data set.
dN/dM = Z x M ** -alpha
where
Z = integral(dN/dM dM) over M_min, M_max
= (1 - alpha) / (M_max ** (1 - alpha) - M_min ** (1 - alpha))
we define beta = 1 - alpha
Z = beta / (M_max ** beta - M_min ** beta)
For this test, we neglect errors of stellar mass observations.
Least-square likelihood could be used in this case. However instead we can make
use that the mass function is the PDF of our data. Thus, We can derive the likelihood as
L(data; alpha) = prod_k Z x M[k] ** alpha
and because numerically more stable and usually makes the math simpler we
define the log-likelhood lnL(Data; alpha) as
lnL(Data; alpha) = N ln(Z) - alpha * sum_k ln(M[k])
Note that the data set only enters via D = sum_k ln(M[k]), term that is
completely independent of the fit parameter. Hence this term can be computed
once and for all at beginning. Therefore, the length of the data set does not
slow down the computations. (This may not be true for non-perfectly known masses)
This experiment aims at implementing Hamiltonian Monte-Carlo method. This
method involves a stepsize as the traditional MH (Metropolis Hastings) method
but improves efficiency by updating this value using the local gradient of the
objective function lnL.
Hamiltonian Monte-Carlo makes use of the fact, that we can write our
log-likelihood as an energy similarly to dynamic systems:
L = exp( ln(L) ) = exp( -E )
So that E = - ln(L) is the energy of any point in the parameter space.
Maximazing the likelihood becomes finding the minimum state of energy.
The algorithm then uses Hamiltonian dynamics to modify the way how candidates
are proposed by computing the total energy of a local point by:
H = 1/2 * p ** 2 + E
where p ** 2 is the impulse (movement between 2 steps)
p is proposed by doing random leap frog steps locally using the gradient of the
objective function.
In our case the gradient function of the log-likelihood is:
d lnL / d theta = d lnL / d alpha
= -D - N / beta * [ 1 + Z * (ln(M_min) * M_min ** beta - ln(M_max) * M_max ** beta) ]
"""
import numpy as np
import random as random
import matplotlib.pyplot as plt
import time
from nuts import nuts6, NutsSampler_fn_wrapper
# set the seed
random.seed(1)
def random_PowerLaw(N, alpha, M_min, M_max):
"""
Draw random samples from a power-law defined over M_min, M_max.
dN/dM = Z x M ** -alpha
INPUTS
------
N: int
number of samples.
alpha: float
power-law index.
M_min: float
lower bound of mass interval.
M_max: float
upper bound of mass interval.
OUTPUTS
-------
masses: ndarray[float, ndim=1]
list of N masses drawn from the power-law
"""
beta = 1. - alpha
x = np.random.uniform(0., 1., N)
if beta == 0:
y = M_min * np.exp( np.log(M_max / M_min) * x )
else:
y = ((M_max ** beta - M_min ** beta) * x + M_min ** beta) ** (1. / beta)
return y
def logLikelihood(theta, D, N, M_min, M_max):
"""
Define logarithmic likelihood function.
theta: ndarray[float, ndim=1]
array of fit params
D: float
data
normalization constant, sum_n log(M_n)
N: int
number of data points
M_min: float
lower limit of mass interval
M_max: float
upper limit of mass interval
"""
# extract alpha
alpha = theta[0]
beta = 1.0 - alpha
# Compute normalisation constant.
if beta == 0:
c = np.log(M_max / M_min)
else:
c = beta / (M_max ** beta - M_min ** beta)
# return log likelihood.
return N * np.log(c) - alpha * D
def grad_logLikelihood(theta, D, N, M_min, M_max):
"""Define gradient of log-likelihood
d lnL / d theta (here only one dim alpha
theta: ndarray[float, ndim=1]
array of fit params
D: float
data
normalization constant, sum_n log(M_n)
N: int
number of data points
M_min: float
lower limit of mass interval
M_max: float
upper limit of mass interval
"""
alpha = theta[0] # extract alpha
beta = 1.0 - alpha
#Convert limits from M to logM.
logMmin = np.log(M_min)
logMmax = np.log(M_max)
if beta != 0:
grad = logMmin * M_min ** beta - logMmax * M_max ** beta
grad = 1.0 + grad * beta / (M_max ** beta - M_min ** beta)
grad = -D - N * grad / beta
else:
grad = float(N)
return np.array([grad])
def test_nuts6():
# Generate toy data.
Nstars = int(1e5)
alpha = 2.35
M_min = 1.0
M_max = 100.0
Masses = random_PowerLaw(Nstars, alpha, M_min, M_max)
LogM = np.log(Masses)
D = np.mean(LogM) * Nstars
#NUTS pars
M, Madapt = 1000, 1000
theta0 = np.asarray([3.0])
delta = 0.5
nuts_fn = NutsSampler_fn_wrapper(logLikelihood, grad_logLikelihood, D, Nstars, M_min, M_max)
#nuts_fn.verbose = True
t_start = time.time()
print("Starting Sampling at %s" % time.ctime(t_start))
A, lnprob, epsilon = nuts6(nuts_fn, M, Madapt, theta0, delta, progress=True)
t_stop = time.time()
print("Sampling Completed in %0.2f seconds" % (t_stop - t_start))
plt.figure(1)
# Print Monte-Carlo estimate of alpha.
print("Mean: " + str(np.mean(A)))
per = np.percentile(A, [16, 50, 84])
print("Alpha = {} (+{} / - {})".format( per[1], per[2] - per[1], per[1] - per[0] ))
n, b = np.histogram(A, 30)
x = 0.5 * (b[:-1] + b[1:])
y = n.astype(float) / n.sum()
plt.step(x, y, color='b', lw=3, where='mid')
plt.vlines(per, 0., max(y), linestyle='--', color='b', lw=1)
ylim = plt.ylim()
plt.vlines(alpha, 0, ylim[1], color='r', lw=3)
plt.ylim(ylim)
plt.xlabel(r'$\alpha$', fontsize=24)
plt.ylabel(r'$\cal L($Data$;\alpha)$', fontsize=24)
plt.show()
try:
from nuts import NUTSSampler
def test_emcee_nuts6():
# Generate toy data.
Nstars = int(1e5)
alpha = 2.35
M_min = 1.0
M_max = 100.0
Masses = random_PowerLaw(Nstars, alpha, M_min, M_max)
LogM = np.log(Masses)
D = np.mean(LogM) * Nstars
#NUTS pars
M, Madapt = 1000, 1000
theta0 = np.asarray([3.0])
delta = 0.6
#nuts_fn = NutsSampler_fn_wrapper(logLikelihood, grad_logLikelihood, D, Nstars, M_min, M_max)
#nuts_fn.verbose = True
sampler = NUTSSampler(len(theta0), logLikelihood, grad_logLikelihood, D, Nstars, M_min, M_max)
t_start = time.time()
print("Starting Sampling at %s" % time.ctime(t_start))
A = sampler.run_mcmc(theta0, M, Madapt, delta)
t_stop = time.time()
print("Sampling Completed in %0.2f seconds" % (t_stop - t_start))
plt.figure(1)
# Print Monte-Carlo estimate of alpha.
print("Mean: " + str(np.mean(A)))
per = np.percentile(A, [16, 50, 84])
print("Alpha = {} (+{} / - {})".format( per[1], per[2] - per[1], per[1] - per[0] ))
n, b = np.histogram(A, 30)
x = 0.5 * (b[:-1] + b[1:])
y = n.astype(float) / n.sum()
plt.step(x, y, color='b', lw=3, where='mid')
plt.vlines(per, 0., max(y), linestyle='--', color='b', lw=1)
ylim = plt.ylim()
plt.vlines(alpha, 0, ylim[1], color='r', lw=3)
plt.ylim(ylim)
plt.xlabel(r'$\alpha$', fontsize=24)
plt.ylabel(r'$\cal L($Data$;\alpha)$', fontsize=24)
plt.show()
except ImportError:
pass
if __name__ == '__main__':
test_nuts6()
#test_emcee_nuts6()
| |
from decimal import Decimal
import logging
import math
from build.management.commands.base_build import Command as BaseBuild
from protein.models import ProteinCouplings
from ligand.models import Ligand, BiasedExperiment, AnalyzedExperiment,AnalyzedAssay
MISSING_PROTEINS = {}
SKIPPED = 0
class Command(BaseBuild):
mylog = logging.getLogger(__name__)
mylog.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
file_handler = logging.FileHandler('biasDataTest.log')
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
mylog.addHandler(file_handler)
help = 'Reads bias data and imports it'
publication_cache = {}
ligand_cache = {}
data_all = []
def add_arguments(self, parser):
parser.add_argument('-p', '--proc',
type=int,
action='store',
dest='proc',
default=1,
help='Number of processes to run')
parser.add_argument('-f', '--filename',
action='append',
dest='filename',
help='Filename to import. Can be used multiple times')
parser.add_argument('-u', '--purge',
action='store_true',
dest='purge',
default=False,
help='Purge existing bias records')
parser.add_argument('--test_run', action='store_true', help='Skip this during a test run',
default=False)
def handle(self, *args, **options):
if options['test_run']:
print('Skipping in test run')
return
# delete any existing structure data
if options['purge']:
try:
print('Started purging bias data')
self.purge_bias_data()
print('Ended purging bias data')
except Exception as msg:
print(msg)
# import the structure data
print('CREATING BIAS DATA')
print(options['filename'])
self.build_bias_data()
self.build_bias_data_subtypes()
self.logger.info('COMPLETED CREATING BIAS DATA')
# pylint: disable=R0201
def purge_bias_data(self):
delete_bias_experiment = AnalyzedExperiment.objects.all()
delete_bias_experiment.delete()
self.logger.info('Data is purged')
def get_from_model(self):
try:
content = BiasedExperiment.objects.all().prefetch_related(
'experiment_data', 'ligand', 'receptor', 'publication'
, 'publication__web_link'
, 'experiment_data__emax_ligand_reference',
).order_by('publication', 'receptor', 'ligand')
except BiasedExperiment.DoesNotExist:
self.logger.info('Data is not returned')
content = None
return content
def process_data(self, content):
rd = []
counter = 0
for instance in enumerate(content):
temp_obj = []
fin_obj = {}
fin_obj['main'] = (instance[1])
vendor_counter = 0
vendors_quantity = None
for i in instance[1].experiment_data_vendors.all():
vendor_counter = vendor_counter + 1
if not vendor_counter:
vendors_quantity = i
self.logger.info(vendors_quantity)
for entry in instance[1].experiment_data.all():
author_list = list()
for author in entry.experiment_data_authors.all():
author_list.append(author.author)
temp_obj.append(entry)
counter += 1
fin_obj['authors'] = author_list
fin_obj['children'] = temp_obj
fin_obj['vendor_counter'] = vendor_counter
rd.append(fin_obj)
self.logger.info('Return dict is returned')
return rd
def queryset_to_dict(self, results):
'''
Merge bias experminet data with assay data
'''
send = list()
for j in results:
temp_dict = dict()
temp = dict()
doubles = []
temp['ligand_source_id'] = None
temp['publication'] = j['main'].publication
temp['species'] = j['main'].receptor.species.common_name
# temp['ligand'] = j['main'].ligand
temp['endogenous_ligand'] = j['main'].endogenous_ligand
temp['receptor'] = j['main'].receptor
temp['assay'] = dict()
temp['vendor_counter'] = j['vendor_counter']
temp['reference'] = list()
temp['authors'] = j['authors']
temp['ref_ligand_experiment'] = dict()
temp['article_quantity'] = 0
temp['labs_quantity'] = 0
temp['ligand_source_id'] = j['main'].ligand_source_id
temp['ligand_source_type'] = j['main'].ligand_source_type
temp['reference_ligand'] = None
if not j['children']:
continue
temp_dict['potency'] = ''
temp_dict['t_factor'] = ''
temp_dict['log_bias_factor'] = ''
temp_dict['order_no'] = 0
temp_dict['reference_ligand'] = None
temp_dict['signalling_protein'] = j['children'][0].signalling_protein.lower()
temp_dict['cell_line'] = j['children'][0].cell_line
temp_dict['family'] = j['children'][0].family
temp_dict['assay_type'] = j['children'][0].assay_type
temp_dict['assay_measure_method'] = j['children'][0].assay_measure
temp_dict['assay_time_resolved'] = j['children'][0].assay_time_resolved
if j['children'][0].quantitive_activity:
temp_dict['quantitive_activity'] = j['children'][0].quantitive_activity
temp_dict['quantitive_activity_initial'] = j['children'][0].quantitive_activity
else:
temp_dict['quantitive_activity'] = None
temp_dict['quantitive_activity_initial'] = None
temp_dict['qualitative_activity'] = j['children'][0].qualitative_activity
temp_dict['quantitive_unit'] = j['children'][0].quantitive_unit
temp_dict['quantitive_efficacy'] = j['children'][0].quantitive_efficacy
temp_dict['efficacy_unit'] = j['children'][0].efficacy_unit
temp_dict['quantitive_measure_type'] = j['children'][0].quantitive_measure_type
temp_dict['efficacy_measure_type'] = j['children'][0].efficacy_measure_type
temp_dict['t_coefficient'] = j['children'][0].bias_value
temp_dict['t_coefficient_initial'] = j['children'][0].bias_value_initial
temp_dict['bias_reference'] = j['children'][0].bias_reference
temp_dict['emax_reference_ligand'] = j['children'][0].emax_ligand_reference
temp_dict['ligand_function'] = j['children'][0].ligand_function
temp_dict['ligand'] = j['main'].ligand
if (temp_dict['quantitive_activity_initial'] and
temp_dict['quantitive_measure_type'] != "Effect at single point measurement"):
temp_dict['quantitive_activity_initial'] = (-1) * math.log10(
temp_dict['quantitive_activity_initial'])
temp_dict['quantitive_activity_initial'] = "{:.2F}".format(
Decimal(temp_dict['quantitive_activity_initial']))
temp['ref_ligand_experiment'] = j['children'][0].emax_ligand_reference
doubles.append(temp_dict)
temp['assay'] = doubles
send.append(temp)
self.logger.info('Queryset processed')
return send
def combine_unique(self, data):
'''
combining tested assays and reference assays
'''
context = dict()
for j in data:
name = str(j['publication'].id) + \
'/' + '/' + str(j['receptor'].id)
temp_obj = list()
if name in context:
temp_obj = context[name]['assay']
for i in j['assay']:
temp_obj.append(i)
context[name] = j
context[name]['assay'] = temp_obj
self.logger.info('Combined experiments by publication and receptor')
return context
def process_referenced_assays(self, data):
'''
separate tested assays and reference assays
'''
for j in data.items():
assays, reference = self.return_refenced_assays(j[1]['assay'])
j[1]['assay_list'] = assays
j[1]['reference_assays_list'] = reference
return data
def return_refenced_assays(self, assays):
# pylint: disable=no-member
main, reference = list(), list()
for assay in assays:
if assay['bias_reference'] != '':
reference.append(assay)
else:
main.append(assay)
sorted_main = sorted(main, key=lambda k: k['quantitive_activity']
if k['quantitive_activity'] else 999999, reverse=True)
sorted_reference = reference
if len(sorted_reference) == 0:
self.get_reference_from_emax(assays)
# if len(sorted_reference) == 0:
# print('implementation required')
return sorted_main, sorted_reference
def filter_reference_assay(self, reference_return, reference_ligand):
reference_return[:] = [d for d in reference_return if d.get('ligand') == reference_ligand]
self.logger.info('Trying to get reference from assays')
return reference_return
def filter_assay_reference(self, assay_return, reference_ligand):
assay_return[:] = [d for d in assay_return if d.get('ligand') != reference_ligand]
self.logger.info('Trying to get filtering references from assays')
return assay_return
def chose_reference_from_assays(self, assays):
references = list()
final_assay = list()
reference_ligand = Ligand()
for i in reversed(assays):
if (i['quantitive_activity'] and i['quantitive_activity'] is not None and
i['quantitive_efficacy'] and i['quantitive_efficacy'] is not None and
i['ligand'] is not None):
reference_ligand=i['ligand']
reference_return = assays.copy()
assay_return = assays.copy()
references=self.filter_reference_assay(reference_return,reference_ligand)
final_assay=self.filter_assay_reference(assay_return,reference_ligand)
self.logger.info('return reference assay')
return references, final_assay
def get_reference_from_emax(self, assays):
reference_ligand = list()
for i in assays:
if i['emax_reference_ligand'] == i['ligand']:
reference_ligand.append(i)
self.logger.info('return reference emax')
return reference_ligand
def separate_ligands(self, context):
content = dict()
for i in context.items():
for assay in i[1]['assay_list']:
name = str(i[1]['publication'].id) + \
'/'+ str(assay['ligand'].id) + '/' + str(i[1]['receptor'].id)
if name in content:
content[name]['assay_list'].append(assay)
else:
content[name] = dict()
content[name]['publication'] = i[1]['publication']
content[name]['ligand'] = assay['ligand']
content[name]['endogenous_ligand'] = i[1]['endogenous_ligand']
content[name]['receptor'] = i[1]['receptor']
content[name]['vendor_counter'] = i[1]['vendor_counter']
content[name]['authors'] = i[1]['authors']
content[name]['ref_ligand_experiment'] = i[1]['ref_ligand_experiment']
content[name]['article_quantity'] = i[1]['article_quantity']
content[name]['labs_quantity'] = i[1]['labs_quantity']
content[name]['assay_list'] = list()
content[name]['ligand_source_type'] = i[1]['ligand_source_type']
content[name]['ligand_source_id'] = i[1]['ligand_source_id']
content[name]['assay_list'].append(assay)
content[name]['reference_assays_list'] = i[1]['reference_assays_list']
content[name]['assay'] = i[1]['assay']
return content
def limit_family_set(self, assay_list):
# pylint: disable=no-member
families = list()
proteins = set()
for assay in assay_list:
if assay['family'] not in proteins:
proteins.add(assay['family'])
families.append(assay)
else:
compare_val = next(item for item in families if item["family"] == assay['family'])
try:
if assay['quantitive_activity'] < compare_val['quantitive_activity']:
families[:] = [d for d in families if d.get('family') != compare_val['family']]
families.append(assay)
except TypeError:
self.logger.info('skipping families if existing copy')
continue
return families
def limit_family_set_subs(self, assay_list):
families = list()
proteins = set()
for assay in assay_list:
if assay['signalling_protein'] not in proteins:
proteins.add(assay['signalling_protein'])
families.append(assay)
else:
compare_val = next(item for item in families if item["signalling_protein"] == assay['signalling_protein'])
try:
if assay['quantitive_activity'] < compare_val['quantitive_activity']:
families[:] = [d for d in families if d.get('signalling_protein') != compare_val['signalling_protein']]
families.append(assay)
except:
families.append(assay)
self.logger.info('limit_family_set_subs error')
return families
def process_calculation(self, context):
for i in context.items():
test = dict()
temp_obj = list()
for j in i[1]['assay_list']:
if j not in temp_obj:
temp_obj.append(j)
else:
pass
i[1]['assay_list'] = temp_obj
test = sorted(i[1]['assay_list'], key=lambda k: k['quantitive_activity']
if k['quantitive_activity'] else 999999, reverse=False)
for item in enumerate(test):
item[1]['order_no'] = item[0]
i[1]['biasdata'] = test
i[1]['reference_lists'] = list()
i[1].pop('assay_list')
# calculate log bias
i[1]['reference_lists'] = self.calc_bias_factor(i[1]['biasdata'], i[1]['reference_assays_list'], i[1]['assay'])
# recalculates lbf if it is negative
i[1]['biasdata'] = self.validate_lbf(i)
self.calc_potency_and_transduction(i[1]['biasdata'])
return context
# pylint: disable=C0301
def calc_bias_factor(self, biasdata, reference, assay):
reference_lists = list()
most_reference = dict()
most_potent = dict()
for i in biasdata:
if i['order_no'] == 0:
most_potent = i
most_reference = self.get_reference_assay(reference, most_potent)
reference_lists.append(most_reference)
i['log_bias_factor'] = None
for i in biasdata:
if i['order_no'] != 0:
temp_reference = self.get_reference_assay(reference, i)
reference_lists.append(temp_reference)
try:
if (i['quantitive_measure_type'].lower() == 'ec50' and temp_reference['quantitive_measure_type'].lower() == 'ec50' and
most_potent['quantitive_measure_type'].lower() == 'ec50' and most_reference['quantitive_measure_type'].lower() == 'ec50'):
a = 0
b = 0
c = 0
d = 0
a = math.log10(
most_potent['quantitive_efficacy'] / most_potent['quantitive_activity'])
b = math.log10(
most_reference['quantitive_efficacy'] / most_reference['quantitive_activity'])
c = math.log10(
i['quantitive_efficacy'] / i['quantitive_activity'])
d = math.log10(
temp_reference['quantitive_efficacy'] / temp_reference['quantitive_activity'])
temp_calculation = self.caclulate_bias_factor_variables(
a, b, c, d)
i['log_bias_factor'] = round(temp_calculation, 1)
elif (i['quantitive_measure_type'].lower() == 'ic50' and temp_reference['quantitive_measure_type'].lower() == 'ic50'):
i['log_bias_factor'] = 'Only agonist in main pathway'
except:
try:
if i['qualitative_activity'] == 'No activity':
i['log_bias_factor'] = "Full Bias"
elif i['qualitative_activity'] == 'Low activity':
i['log_bias_factor'] = "High Bias"
elif i['qualitative_activity'] == 'High activity':
i['log_bias_factor'] = "Low Bias"
except:
i['log_bias_factor'] = None
return reference_lists
def get_reference_assay(self, reference, assay):
return_assay = dict()
try:
for i in reference:
if i['signalling_protein'] == assay['signalling_protein']:
if i['assay_type'] == assay['assay_type']:
return_assay = i
except:
self.logger.info('get_reference_assay error')
return return_assay
return return_assay
def caclulate_bias_factor_variables(self, a, b, c, d):
'''
calculations for log bias factor inputs
'''
lgb = 0
try:
lgb = (a - b) - (c - d)
except:
lgb = 0
self.logger.info('caclulate_bias_factor_variables error')
return lgb
def calc_potency_and_transduction(self, biasdata):
count = 0
most_potent = dict()
for i in biasdata:
count += 1
if i['order_no'] == 0:
most_potent = i
# T_factor -- bias factor
for i in biasdata:
if i['order_no'] > 0:
try:
if i['quantitive_measure_type'].lower() == 'ec50' or i['quantitive_measure_type'].lower() == 'ic50':
if i['quantitive_activity'] is not None and i['quantitive_activity'] != 0 and most_potent['quantitive_activity'] is not None:
i['potency'] = round(
i['quantitive_activity'] / most_potent['quantitive_activity'], 1)
elif i['quantitive_measure_type'].lower() == 'pec50' or i['quantitive_measure_type'].lower() == 'pic50':
i['potency'] = round(
most_potent['quantitive_activity'] - i['quantitive_activity'], 1)
except:
i['potency'] = None
if i['t_coefficient'] is not None and most_potent['t_coefficient'] is not None:
i['t_factor'] = round(
most_potent['t_coefficient'] - i['t_coefficient'], 1)
else:
i['t_factor'] = None
self.logger.info('t_factor error')
def validate_lbf(self, i):
for x in i[1]['biasdata']:
if isinstance(x['log_bias_factor'], float):
if x['log_bias_factor'] < 0.0:
j = next((item for item in i[1]['biasdata'] if item["order_no"] == 0), None)
x['order_no'], j['order_no'] = j['order_no'], x['order_no']
self.calc_bias_factor(i[1]['biasdata'], i[1]['reference_assays_list'], i[1]['assay'])
self.validate_lbf(i)
else:
self.logger.info('validate_lbf error')
return i[1]['biasdata']
return i[1]['biasdata']
def save_data_to_model(self, context, source):
for i in context['data'].items():
if self.fetch_experiment(i[1]['publication'], i[1]['ligand'], i[1]['receptor'], source) == False:
primary, secondary = self.fetch_receptor_transducers(
i[1]['receptor'])
if len(i[1]['biasdata']) > 1:
experiment_entry = AnalyzedExperiment(publication=i[1]['publication'],
ligand=i[1]['ligand'],
receptor=i[1]['receptor'],
source=source,
endogenous_ligand=i[1]['endogenous_ligand'],
vendor_quantity=i[1]['vendor_counter'],
reference_ligand=i[1]['ref_ligand_experiment'],
primary=primary,
secondary=secondary,
article_quantity=i[1]['article_quantity'],
labs_quantity=i[1]['labs'],
ligand_source_id = i[1]['ligand_source_id'],
ligand_source_type = i[1]['ligand_source_type']
)
experiment_entry.save()
for ex in i[1]['biasdata']:
emax_ligand = ex['emax_reference_ligand']
experiment_assay = AnalyzedAssay(experiment=experiment_entry,
family=ex['family'],
order_no=ex['order_no'],
signalling_protein=ex['signalling_protein'],
cell_line=ex['cell_line'],
assay_type=ex['assay_type'],
assay_measure=ex['assay_measure_method'],
assay_time_resolved=ex['assay_time_resolved'],
ligand_function=ex['ligand_function'],
quantitive_measure_type=ex['quantitive_measure_type'],
quantitive_activity=ex['quantitive_activity'],
quantitive_activity_initial=ex['quantitive_activity_initial'],
quantitive_unit=ex['quantitive_unit'],
qualitative_activity=ex['qualitative_activity'],
quantitive_efficacy=ex['quantitive_efficacy'],
efficacy_measure_type=ex['efficacy_measure_type'],
efficacy_unit=ex['efficacy_unit'],
potency=ex['potency'],
t_coefficient=ex['t_coefficient'],
t_value=ex['t_coefficient_initial'],
t_factor=ex['t_factor'],
log_bias_factor=ex['log_bias_factor'],
emax_ligand_reference=emax_ligand
)
experiment_assay.save()
for ex in i[1]['reference_lists']:
try:
emax_ligand = ex['emax_reference_ligand']
experiment_assay = AnalyzedAssay(experiment=experiment_entry,
assay_description='reference_assay',
family=ex['family'],
order_no=ex['order_no'],
signalling_protein=ex['signalling_protein'],
cell_line=ex['cell_line'],
assay_type=ex['assay_type'],
assay_measure=ex['assay_measure_method'],
assay_time_resolved=ex['assay_time_resolved'],
ligand_function=ex['ligand_function'],
quantitive_measure_type=ex['quantitive_measure_type'],
quantitive_activity=ex['quantitive_activity'],
quantitive_activity_initial=ex['quantitive_activity_initial'],
quantitive_unit=ex['quantitive_unit'],
qualitative_activity=ex['qualitative_activity'],
quantitive_efficacy=ex['quantitive_efficacy'],
efficacy_measure_type=ex['efficacy_measure_type'],
efficacy_unit=ex['efficacy_unit'],
potency=ex['potency'],
t_coefficient=ex['t_coefficient'],
t_value=ex['t_coefficient_initial'],
t_factor=ex['t_factor'],
log_bias_factor=ex['log_bias_factor'],
emax_ligand_reference=emax_ligand
)
experiment_assay.save()
except:
pass
else:
pass
else:
self.logger.info('saving error')
def fetch_experiment(self, publication, ligand, receptor, source):
'''
fetch receptor with Protein model
requires: protein id, source
'''
try:
experiment = AnalyzedExperiment.objects.filter(
publication=publication, ligand=ligand, receptor=receptor, source=source)
experiment = experiment.get()
return True
except Exception:
self.logger.info('fetch_experiment error')
experiment = None
return False
def fetch_receptor_transducers(self, receptor):
primary = set()
temp = str()
temp1 = str()
secondary = set()
try:
gprotein = ProteinCouplings.objects.filter(protein=receptor)
for x in gprotein:
if x.transduction and x.transduction == 'primary':
primary.add(x.g_protein.name)
elif x.transduction and x.transduction == 'secondary':
secondary.add(x.g_protein.name)
for i in primary:
temp += str(i.replace(' family', '')) + str(', ')
for i in secondary:
temp1 += str(i.replace('family', '')) + str(', ')
return temp, temp1
except:
self.logger.info('receptor not found error')
return None, None
def process_signalling_proteins(self, context):
for i in context.items():
i[1]['assay_list'] = self.limit_family_set(i[1]['assay_list'])
self.logger.info('process_signalling_proteins')
return context
def process_signalling_proteins_subs(self, context):
for i in context.items():
i[1]['assay_list'] = self.limit_family_set_subs(i[1]['assay_list'])
self.logger.info('process_signalling_proteins_subs')
return context
def build_bias_data(self):
print('Build bias data gproteins')
context = dict()
content = self.get_from_model()
print('stage # 1: Getting data finished, data points: ', len(content))
content_with_children = self.process_data(content)
print('stage # 2: Processing children in queryset finished', len(content_with_children))
changed_data = self.queryset_to_dict(content_with_children)
print('stage # 3: Converting queryset into dict finished', len(changed_data))
send = self.combine_unique(changed_data)
referenced_assay = self.process_referenced_assays(send)
print('stage # 4: Separating reference assays is finished', len(referenced_assay))
ligand_data = self.separate_ligands(referenced_assay)
limit_family = self.process_signalling_proteins(ligand_data)
print('stage # 5: Separate ligands', len(limit_family))
calculated_assay = self.process_calculation(limit_family)
print('stage # 6: Calucating finished')
self.count_publications(calculated_assay)
print('stage # 7: labs and publications counted')
context.update({'data': calculated_assay})
print('stage # 8: combining data into common dict is finished')
# save dataset to model
self.save_data_to_model(context, 'different_family')
print('stage # 9: saving data to model is finished')
def build_bias_data_subtypes(self):
print('Build bias data gproteins')
context = dict()
content = self.get_from_model()
print('stage # 1 : Getting data finished, data points: ', len(content))
content_with_children = self.process_data(content)
print('stage # 2: Processing children in queryset finished', len(content_with_children))
changed_data = self.queryset_to_dict(content_with_children)
print('stage # 3: Converting queryset into dict finished', len(changed_data))
send = self.combine_unique(changed_data)
referenced_assay = self.process_referenced_assays(send)
print('stage # 4: Separating reference assays is finished', len(referenced_assay))
ligand_data = self.separate_ligands(referenced_assay)
limit_family = self.process_signalling_proteins_subs(ligand_data)
print('stage # 5: Separate ligands')
calculated_assay = self.process_calculation(limit_family)
print('stage # 6: Merging assays with same ligand/receptor/publication is finished')
self.count_publications(calculated_assay)
print('stage # 7: labs and publications counted')
context.update({'data': calculated_assay})
print('stage # 8: combining data into common dict is finished')
# save dataset to model
self.save_data_to_model(context, 'same_family')
print('stage # 9: saving data to model is finished')
def count_publications(self, context):
temp = dict()
for i in context.items():
labs = list()
i[1]['labs'] = 0
labs.append(i[1]['publication'])
lab_counter = 1
for j in context.items():
if j[1]['publication'] not in labs:
if set(i[1]['authors']) & set(j[1]['authors']):
lab_counter += 1
labs.append(j[1]['publication'])
i[1]['labs'] = lab_counter
temp_obj = 1
name = str(i[1]['ref_ligand_experiment']) + \
'/' + str(i[1]['ligand']) + '/' + str(i[1]['receptor'])
if name in temp:
for assays in i[1]['biasdata']:
if assays['order_no'] > 0:
if assays['log_bias_factor'] != None and assays['log_bias_factor'] != '' or assays['t_factor'] != None and assays['t_factor'] != '':
temp_obj = temp[name] + 1
temp[name] = temp_obj
for i in context.items():
temp_obj = 0
name = str(i[1]['ref_ligand_experiment']) + \
'/' + str(i[1]['ligand']) + '/' + str(i[1]['receptor'])
if name in temp:
i[1]['article_quantity'] = temp[name]
self.logger.info('count_publications')
| |
import re
from django.conf import settings
from rest_framework import exceptions, serializers
from olympia import amo
from olympia.accounts.serializers import (
BaseUserSerializer, UserProfileBasketSyncSerializer)
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.urlresolvers import get_outgoing_url, reverse
from olympia.api.fields import (
ESTranslationSerializerField, ReverseChoiceField,
TranslationSerializerField)
from olympia.api.serializers import BaseESSerializer
from olympia.api.utils import is_gate_active
from olympia.applications.models import AppVersion
from olympia.bandwagon.models import Collection
from olympia.constants.applications import APPS_ALL
from olympia.constants.base import ADDON_TYPE_CHOICES_API
from olympia.constants.categories import CATEGORIES_BY_ID
from olympia.files.models import File
from olympia.search.filters import AddonAppVersionQueryParam
from olympia.users.models import UserProfile
from olympia.versions.models import (
ApplicationsVersions, License, Version, VersionPreview)
from .models import Addon, Preview, ReplacementAddon, attach_tags
class FileSerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField()
platform = ReverseChoiceField(
choices=list(amo.PLATFORM_CHOICES_API.items()))
status = ReverseChoiceField(choices=list(amo.STATUS_CHOICES_API.items()))
permissions = serializers.ListField(
source='webext_permissions_list',
child=serializers.CharField())
is_restart_required = serializers.BooleanField()
class Meta:
model = File
fields = ('id', 'created', 'hash', 'is_restart_required',
'is_webextension', 'is_mozilla_signed_extension',
'platform', 'size', 'status', 'url', 'permissions')
def get_url(self, obj):
return obj.get_absolute_url(src='')
class PreviewSerializer(serializers.ModelSerializer):
caption = TranslationSerializerField()
image_url = serializers.SerializerMethodField()
thumbnail_url = serializers.SerializerMethodField()
class Meta:
# Note: this serializer can also be used for VersionPreview.
model = Preview
fields = ('id', 'caption', 'image_size', 'image_url', 'thumbnail_size',
'thumbnail_url')
def get_image_url(self, obj):
return absolutify(obj.image_url)
def get_thumbnail_url(self, obj):
return absolutify(obj.thumbnail_url)
class ESPreviewSerializer(BaseESSerializer, PreviewSerializer):
# Because we have translated fields and dates coming from ES, we can't use
# a regular PreviewSerializer to handle previews for ESAddonSerializer.
# Unfortunately we also need to get the class right (it can be either
# Preview or VersionPreview) so fake_object() implementation in this class
# does nothing, the instance has already been created by a parent
# serializer.
datetime_fields = ('modified',)
translated_fields = ('caption',)
def fake_object(self, data):
return data
class LicenseSerializer(serializers.ModelSerializer):
is_custom = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
text = TranslationSerializerField()
url = serializers.SerializerMethodField()
class Meta:
model = License
fields = ('id', 'is_custom', 'name', 'text', 'url')
def __init__(self, *args, **kwargs):
super(LicenseSerializer, self).__init__(*args, **kwargs)
self.db_name = TranslationSerializerField()
self.db_name.bind('name', self)
def get_is_custom(self, obj):
return not bool(obj.builtin)
def get_url(self, obj):
return obj.url or self.get_version_license_url(obj)
def get_version_license_url(self, obj):
# We need the version associated with the license, because that's where
# the license_url() method lives. The problem is, normally we would not
# be able to do that, because there can be multiple versions for a
# given License. However, since we're serializing through a nested
# serializer, we cheat and use `instance.version_instance` which is
# set by SimpleVersionSerializer.to_representation() while serializing.
# Only get the version license url for non-builtin licenses.
if not obj.builtin and hasattr(obj, 'version_instance'):
return absolutify(obj.version_instance.license_url())
return None
def get_name(self, obj):
# See if there is a license constant
license_constant = obj._constant
if not license_constant:
# If not fall back on the name in the database.
return self.db_name.get_attribute(obj)
else:
request = self.context.get('request', None)
if request and request.method == 'GET' and 'lang' in request.GET:
# A single lang requested so return a flat string
return str(license_constant.name)
else:
# Otherwise mock the dict with the default lang.
lang = getattr(request, 'LANG', None) or settings.LANGUAGE_CODE
return {lang: str(license_constant.name)}
def to_representation(self, instance):
data = super(LicenseSerializer, self).to_representation(instance)
request = self.context.get('request', None)
if request and is_gate_active(
request, 'del-version-license-is-custom'):
data.pop('is_custom', None)
return data
class CompactLicenseSerializer(LicenseSerializer):
class Meta:
model = License
fields = ('id', 'is_custom', 'name', 'url')
class MinimalVersionSerializer(serializers.ModelSerializer):
files = FileSerializer(source='all_files', many=True)
class Meta:
model = Version
fields = ('id', 'files', 'reviewed', 'version')
class SimpleVersionSerializer(MinimalVersionSerializer):
compatibility = serializers.SerializerMethodField()
edit_url = serializers.SerializerMethodField()
is_strict_compatibility_enabled = serializers.SerializerMethodField()
license = CompactLicenseSerializer()
release_notes = TranslationSerializerField()
class Meta:
model = Version
fields = ('id', 'compatibility', 'edit_url', 'files',
'is_strict_compatibility_enabled', 'license',
'release_notes', 'reviewed', 'version')
def to_representation(self, instance):
# Help the LicenseSerializer find the version we're currently
# serializing.
if 'license' in self.fields and instance.license:
instance.license.version_instance = instance
return super(SimpleVersionSerializer, self).to_representation(instance)
def get_compatibility(self, obj):
return {
app.short: {
'min': compat.min.version if compat else (
amo.D2C_MIN_VERSIONS.get(app.id, '1.0')),
'max': compat.max.version if compat else amo.FAKE_MAX_VERSION
} for app, compat in obj.compatible_apps.items()
}
def get_edit_url(self, obj):
return absolutify(obj.addon.get_dev_url(
'versions.edit', args=[obj.pk], prefix_only=True))
def get_is_strict_compatibility_enabled(self, obj):
return any(file_.strict_compatibility for file_ in obj.all_files)
class VersionSerializer(SimpleVersionSerializer):
channel = ReverseChoiceField(
choices=list(amo.CHANNEL_CHOICES_API.items()))
license = LicenseSerializer()
class Meta:
model = Version
fields = ('id', 'channel', 'compatibility', 'edit_url', 'files',
'is_strict_compatibility_enabled', 'license',
'release_notes', 'reviewed', 'version')
class CurrentVersionSerializer(SimpleVersionSerializer):
def to_representation(self, obj):
# If the add-on is a langpack, and `appversion` is passed, try to
# determine the latest public compatible version and replace the obj
# with the result. Because of the perf impact, only done for langpacks
# in the detail API.
request = self.context.get('request')
view = self.context.get('view')
addon = obj.addon
if (request and request.GET.get('appversion') and
getattr(view, 'action', None) == 'retrieve' and
addon.type == amo.ADDON_LPAPP):
obj = self.get_current_compatible_version(addon)
return super(CurrentVersionSerializer, self).to_representation(obj)
def get_current_compatible_version(self, addon):
"""
Return latest public version compatible with the app & appversion
passed through the request, or fall back to addon.current_version if
none is found.
Only use on langpacks if the appversion parameter is present.
"""
request = self.context.get('request')
try:
# AddonAppVersionQueryParam.get_values() returns (app_id, min, max)
# but we want {'min': min, 'max': max}.
value = AddonAppVersionQueryParam(request).get_values()
application = value[0]
appversions = dict(zip(('min', 'max'), value[1:]))
except ValueError as exc:
raise exceptions.ParseError(str(exc))
version_qs = Version.objects.latest_public_compatible_with(
application, appversions).filter(addon=addon)
return version_qs.first() or addon.current_version
class ESCompactLicenseSerializer(BaseESSerializer, CompactLicenseSerializer):
translated_fields = ('name', )
def __init__(self, *args, **kwargs):
super(ESCompactLicenseSerializer, self).__init__(*args, **kwargs)
self.db_name = ESTranslationSerializerField()
self.db_name.bind('name', self)
def fake_object(self, data):
# We just pass the data as the fake object will have been created
# before by ESAddonSerializer.fake_version_object()
return data
class ESCurrentVersionSerializer(BaseESSerializer, CurrentVersionSerializer):
license = ESCompactLicenseSerializer()
datetime_fields = ('reviewed',)
translated_fields = ('release_notes',)
def fake_object(self, data):
# We just pass the data as the fake object will have been created
# before by ESAddonSerializer.fake_version_object()
return data
class AddonEulaPolicySerializer(serializers.ModelSerializer):
eula = TranslationSerializerField()
privacy_policy = TranslationSerializerField()
class Meta:
model = Addon
fields = (
'eula',
'privacy_policy',
)
class AddonDeveloperSerializer(BaseUserSerializer):
picture_url = serializers.SerializerMethodField()
class Meta(BaseUserSerializer.Meta):
fields = BaseUserSerializer.Meta.fields + (
'picture_url',)
read_only_fields = fields
class AddonSerializer(serializers.ModelSerializer):
authors = AddonDeveloperSerializer(many=True, source='listed_authors')
categories = serializers.SerializerMethodField()
contributions_url = serializers.URLField(source='contributions')
current_version = CurrentVersionSerializer()
description = TranslationSerializerField()
developer_comments = TranslationSerializerField()
edit_url = serializers.SerializerMethodField()
has_eula = serializers.SerializerMethodField()
has_privacy_policy = serializers.SerializerMethodField()
homepage = TranslationSerializerField()
icon_url = serializers.SerializerMethodField()
icons = serializers.SerializerMethodField()
is_source_public = serializers.BooleanField(source='view_source')
is_featured = serializers.SerializerMethodField()
name = TranslationSerializerField()
previews = PreviewSerializer(many=True, source='current_previews')
ratings = serializers.SerializerMethodField()
ratings_url = serializers.SerializerMethodField()
review_url = serializers.SerializerMethodField()
status = ReverseChoiceField(choices=list(amo.STATUS_CHOICES_API.items()))
summary = TranslationSerializerField()
support_email = TranslationSerializerField()
support_url = TranslationSerializerField()
tags = serializers.SerializerMethodField()
type = ReverseChoiceField(choices=list(amo.ADDON_TYPE_CHOICES_API.items()))
url = serializers.SerializerMethodField()
class Meta:
model = Addon
fields = (
'id',
'authors',
'average_daily_users',
'categories',
'contributions_url',
'created',
'current_version',
'default_locale',
'description',
'developer_comments',
'edit_url',
'guid',
'has_eula',
'has_privacy_policy',
'homepage',
'icon_url',
'icons',
'is_disabled',
'is_experimental',
'is_featured',
'is_recommended',
'is_source_public',
'last_updated',
'name',
'previews',
'public_stats',
'ratings',
'ratings_url',
'requires_payment',
'review_url',
'slug',
'status',
'summary',
'support_email',
'support_url',
'tags',
'type',
'url',
'weekly_downloads'
)
def to_representation(self, obj):
data = super(AddonSerializer, self).to_representation(obj)
request = self.context.get('request', None)
if ('request' in self.context and
'wrap_outgoing_links' in self.context['request'].GET):
for key in ('homepage', 'support_url', 'contributions_url'):
if key in data:
data[key] = self.outgoingify(data[key])
if request and is_gate_active(request, 'del-addons-created-field'):
data.pop('created', None)
return data
def outgoingify(self, data):
if data:
if isinstance(data, str):
return get_outgoing_url(data)
elif isinstance(data, dict):
return {key: get_outgoing_url(value) if value else None
for key, value in data.items()}
# None or empty string... don't bother.
return data
def get_categories(self, obj):
return {
app_short_name: [cat.slug for cat in categories]
for app_short_name, categories in obj.app_categories.items()
}
def get_has_eula(self, obj):
return bool(getattr(obj, 'has_eula', obj.eula))
def get_is_featured(self, obj):
# obj._is_featured is set from ES, so will only be present for list
# requests.
if not hasattr(obj, '_is_featured'):
# Any featuring will do.
obj._is_featured = obj.is_featured(app=None, lang=None)
return obj._is_featured
def get_has_privacy_policy(self, obj):
return bool(getattr(obj, 'has_privacy_policy', obj.privacy_policy))
def get_tags(self, obj):
if not hasattr(obj, 'tag_list'):
attach_tags([obj])
# attach_tags() might not have attached anything to the addon, if it
# had no tags.
return getattr(obj, 'tag_list', [])
def get_url(self, obj):
# Use absolutify(get_detail_url()), get_absolute_url() calls
# get_url_path() which does an extra check on current_version that is
# annoying in subclasses which don't want to load that version.
return absolutify(obj.get_detail_url())
def get_edit_url(self, obj):
return absolutify(obj.get_dev_url())
def get_ratings_url(self, obj):
return absolutify(obj.ratings_url)
def get_review_url(self, obj):
return absolutify(reverse('reviewers.review', args=[obj.pk]))
def get_icon_url(self, obj):
return absolutify(obj.get_icon_url(64))
def get_icons(self, obj):
get_icon = obj.get_icon_url
return {str(size): absolutify(get_icon(size))
for size in amo.ADDON_ICON_SIZES}
def get_ratings(self, obj):
return {
'average': obj.average_rating,
'bayesian_average': obj.bayesian_rating,
'count': obj.total_ratings,
'text_count': obj.text_ratings_count,
}
class AddonSerializerWithUnlistedData(AddonSerializer):
latest_unlisted_version = SimpleVersionSerializer()
class Meta:
model = Addon
fields = AddonSerializer.Meta.fields + ('latest_unlisted_version',)
class SimpleAddonSerializer(AddonSerializer):
class Meta:
model = Addon
fields = ('id', 'slug', 'name', 'icon_url')
class ESAddonSerializer(BaseESSerializer, AddonSerializer):
# Override various fields for related objects which we don't want to expose
# data the same way than the regular serializer does (usually because we
# some of the data is not indexed in ES).
authors = BaseUserSerializer(many=True, source='listed_authors')
current_version = ESCurrentVersionSerializer()
previews = ESPreviewSerializer(many=True, source='current_previews')
_score = serializers.SerializerMethodField()
datetime_fields = ('created', 'last_updated', 'modified')
translated_fields = ('name', 'description', 'developer_comments',
'homepage', 'summary', 'support_email', 'support_url')
class Meta:
model = Addon
fields = AddonSerializer.Meta.fields + ('_score', )
def fake_preview_object(self, obj, data, model_class=Preview):
# This is what ESPreviewSerializer.fake_object() would do, but we do
# it here and make that fake_object() method a no-op in order to have
# access to the right model_class to use - VersionPreview for static
# themes, Preview for the rest.
preview = model_class(id=data['id'], sizes=data.get('sizes', {}))
preview.addon = obj
preview.version = obj.current_version
preview_serializer = self.fields['previews'].child
# Attach base attributes that have the same name/format in ES and in
# the model.
preview_serializer._attach_fields(preview, data, ('modified',))
# Attach translations.
preview_serializer._attach_translations(
preview, data, preview_serializer.translated_fields)
return preview
def fake_file_object(self, obj, data):
file_ = File(
id=data['id'], created=self.handle_date(data['created']),
hash=data['hash'], filename=data['filename'],
is_webextension=data.get('is_webextension'),
is_mozilla_signed_extension=data.get(
'is_mozilla_signed_extension'),
is_restart_required=data.get('is_restart_required', False),
platform=data['platform'], size=data['size'],
status=data['status'],
strict_compatibility=data.get('strict_compatibility', False),
version=obj)
file_.webext_permissions_list = data.get('webext_permissions_list', [])
return file_
def fake_version_object(self, obj, data, channel):
if data:
version = Version(
addon=obj, id=data['id'],
reviewed=self.handle_date(data['reviewed']),
version=data['version'], channel=channel)
version.all_files = [
self.fake_file_object(version, file_data)
for file_data in data.get('files', [])
]
# In ES we store integers for the appversion info, we need to
# convert it back to strings.
compatible_apps = {}
for app_id, compat_dict in data.get('compatible_apps', {}).items():
app_name = APPS_ALL[int(app_id)]
compatible_apps[app_name] = ApplicationsVersions(
min=AppVersion(version=compat_dict.get('min_human', '')),
max=AppVersion(version=compat_dict.get('max_human', '')))
version._compatible_apps = compatible_apps
version_serializer = self.fields['current_version']
version_serializer._attach_translations(
version, data, version_serializer.translated_fields)
if 'license' in data:
license_serializer = version_serializer.fields['license']
version.license = License(id=data['license']['id'])
license_serializer._attach_fields(
version.license, data['license'], ('builtin', 'url'))
# Can't use license_serializer._attach_translations() directly
# because 'name' is a SerializerMethodField, not an
# ESTranslatedField.
license_serializer.db_name.attach_translations(
version.license, data['license'], 'name')
else:
version.license = None
else:
version = None
return version
def fake_object(self, data):
"""Create a fake instance of Addon and related models from ES data."""
obj = Addon(id=data['id'], slug=data['slug'])
# Attach base attributes that have the same name/format in ES and in
# the model.
self._attach_fields(
obj, data, (
'average_daily_users',
'bayesian_rating',
'contributions',
'created',
'default_locale',
'guid',
'has_eula',
'has_privacy_policy',
'hotness',
'icon_hash',
'icon_type',
'is_experimental',
'is_recommended',
'last_updated',
'modified',
'public_stats',
'requires_payment',
'slug',
'status',
'type',
'view_source',
'weekly_downloads'
)
)
# Attach attributes that do not have the same name/format in ES.
obj.tag_list = data.get('tags', [])
obj.all_categories = [
CATEGORIES_BY_ID[cat_id] for cat_id in data.get('category', [])]
# Not entirely accurate, but enough in the context of the search API.
obj.disabled_by_user = data.get('is_disabled', False)
# Attach translations (they require special treatment).
self._attach_translations(obj, data, self.translated_fields)
# Attach related models (also faking them). `current_version` is a
# property we can't write to, so we use the underlying field which
# begins with an underscore.
data_version = data.get('current_version') or {}
obj._current_version = self.fake_version_object(
obj, data_version, amo.RELEASE_CHANNEL_LISTED)
obj._current_version_id = data_version.get('id')
data_authors = data.get('listed_authors', [])
obj.listed_authors = [
UserProfile(
id=data_author['id'], display_name=data_author['name'],
username=data_author['username'],
is_public=data_author.get('is_public', False))
for data_author in data_authors
]
is_static_theme = data.get('type') == amo.ADDON_STATICTHEME
preview_model_class = VersionPreview if is_static_theme else Preview
obj.current_previews = [
self.fake_preview_object(
obj, preview_data, model_class=preview_model_class)
for preview_data in data.get('previews', [])
]
ratings = data.get('ratings', {})
obj.average_rating = ratings.get('average')
obj.total_ratings = ratings.get('count')
obj.text_ratings_count = ratings.get('text_count')
obj._is_featured = data.get('is_featured', False)
return obj
def get__score(self, obj):
# es_meta is added by BaseESSerializer.to_representation() before DRF's
# to_representation() is called, so it's present on all objects.
return obj._es_meta['score']
def to_representation(self, obj):
data = super(ESAddonSerializer, self).to_representation(obj)
request = self.context.get('request')
if request and '_score' in data and not is_gate_active(
request, 'addons-search-_score-field'):
data.pop('_score')
return data
class ESAddonAutoCompleteSerializer(ESAddonSerializer):
class Meta(ESAddonSerializer.Meta):
fields = ('id', 'icon_url', 'is_recommended', 'name', 'type', 'url')
model = Addon
def get_url(self, obj):
# Addon.get_absolute_url() calls get_url_path(), which wants
# _current_version_id to exist, but that's just a safeguard. We don't
# care and don't want to fetch the current version field to improve
# perf, so give it a fake one.
obj._current_version_id = 1
return obj.get_absolute_url()
class StaticCategorySerializer(serializers.Serializer):
"""Serializes a `StaticCategory` as found in constants.categories"""
id = serializers.IntegerField()
name = serializers.CharField()
slug = serializers.CharField()
application = serializers.SerializerMethodField()
misc = serializers.BooleanField()
type = serializers.SerializerMethodField()
weight = serializers.IntegerField()
description = serializers.CharField()
def get_application(self, obj):
return APPS_ALL[obj.application].short
def get_type(self, obj):
return ADDON_TYPE_CHOICES_API[obj.type]
class LanguageToolsSerializer(AddonSerializer):
target_locale = serializers.CharField()
current_compatible_version = serializers.SerializerMethodField()
class Meta:
model = Addon
fields = ('id', 'current_compatible_version', 'default_locale', 'guid',
'name', 'slug', 'target_locale', 'type', 'url', )
def get_current_compatible_version(self, obj):
compatible_versions = getattr(obj, 'compatible_versions', None)
if compatible_versions is not None:
data = MinimalVersionSerializer(
compatible_versions, many=True).data
try:
# 99% of the cases there will only be one result, since most
# language packs are automatically uploaded for a given app
# version. If there are more, pick the most recent one.
return data[0]
except IndexError:
# This should not happen, because the queryset in the view is
# supposed to filter results to only return add-ons that do
# have at least one compatible version, but let's not fail
# too loudly if the unthinkable happens...
pass
return None
def to_representation(self, obj):
data = super(LanguageToolsSerializer, self).to_representation(obj)
request = self.context['request']
if (AddonAppVersionQueryParam.query_param not in request.GET and
'current_compatible_version' in data):
data.pop('current_compatible_version')
if request and is_gate_active(
request, 'addons-locale_disambiguation-shim'):
data['locale_disambiguation'] = None
return data
class VersionBasketSerializer(SimpleVersionSerializer):
class Meta:
model = Version
fields = ('id', 'compatibility', 'is_strict_compatibility_enabled',
'version')
class AddonBasketSyncSerializer(AddonSerializerWithUnlistedData):
# We want to send all authors to basket, not just listed ones, and have
# the full basket-specific serialization.
authors = UserProfileBasketSyncSerializer(many=True)
name = serializers.SerializerMethodField()
latest_unlisted_version = VersionBasketSerializer()
current_version = VersionBasketSerializer()
class Meta:
model = Addon
fields = ('authors', 'average_daily_users', 'categories',
'current_version', 'default_locale', 'guid', 'id',
'is_disabled', 'is_recommended', 'last_updated',
'latest_unlisted_version', 'name', 'ratings', 'slug',
'status', 'type')
read_only_fields = fields
def get_name(self, obj):
# Basket doesn't want translations, we run the serialization task under
# the add-on default locale so we can just return the name as string.
return str(obj.name)
class ReplacementAddonSerializer(serializers.ModelSerializer):
replacement = serializers.SerializerMethodField()
ADDON_PATH_REGEX = r"""/addon/(?P<addon_id>[^/<>"']+)/$"""
COLLECTION_PATH_REGEX = (
r"""/collections/(?P<user_id>[^/<>"']+)/(?P<coll_slug>[^/]+)/$""")
class Meta:
model = ReplacementAddon
fields = ('guid', 'replacement')
def _get_addon_guid(self, addon_id):
try:
addon = Addon.objects.public().id_or_slug(addon_id).get()
except Addon.DoesNotExist:
return []
return [addon.guid]
def _get_collection_guids(self, user_id, collection_slug):
try:
get_args = {'slug': collection_slug, 'listed': True}
if isinstance(user_id, str) and not user_id.isdigit():
get_args.update(**{'author__username': user_id})
else:
get_args.update(**{'author': user_id})
collection = Collection.objects.get(**get_args)
except Collection.DoesNotExist:
return []
valid_q = Addon.objects.get_queryset().valid_q([amo.STATUS_APPROVED])
return list(
collection.addons.filter(valid_q).values_list('guid', flat=True))
def get_replacement(self, obj):
if obj.has_external_url():
# It's an external url so no guids.
return []
addon_match = re.search(self.ADDON_PATH_REGEX, obj.path)
if addon_match:
return self._get_addon_guid(addon_match.group('addon_id'))
coll_match = re.search(self.COLLECTION_PATH_REGEX, obj.path)
if coll_match:
return self._get_collection_guids(
coll_match.group('user_id'), coll_match.group('coll_slug'))
return []
| |
# -*- coding: utf-8 -*-
"""The Manager is a key component of HMDB. This class is used to create, populate and query the local HMDB version."""
import logging
from typing import List, Mapping, Optional
from bel_resources import get_bel_resource
from bio2bel import AbstractManager
from tqdm import tqdm
from .constants import DOID, HP, MESHD, MODULE_NAME, ONTOLOGIES, ONTOLOGY_NAMESPACES
from .models import (
Base, Biofluid, Biofunction, CellularLocation, Disease, Metabolite, MetaboliteBiofluid, MetaboliteCellularLocation,
MetaboliteDiseaseReference, MetabolitePathway, MetaboliteProtein, MetaboliteReference,
MetaboliteSynonym, MetaboliteTissue, Pathway, Protein, Reference, SecondaryAccession, Tissue,
)
from .parser import get_data
__all__ = [
'Manager',
]
log = logging.getLogger(__name__)
class Manager(AbstractManager):
"""Metabolite-proteins and metabolite-disease associations."""
module_name = MODULE_NAME
flask_admin_models = [Metabolite, Disease, Protein, Pathway, Biofluid]
_base = Base
def is_populated(self) -> bool:
"""Check if the database is already populated."""
return 0 < self.count_metabolites()
@staticmethod
def _get_tag(element_tag) -> str:
"""Delete the XML namespace prefix when calling element.tag
:param element_tag: tag attribute of an XML element
"""
return element_tag.split("}")[1]
def _populate_with_1_layer_elements(
self,
element,
metabolite_instance,
instance_dict,
table,
relation_table,
column_name: str,
):
"""Parse and populate database with metabolite elements, which themselfes have one more layer.
:param element: the current parent XML element. E.g. "pathways" where the children would have the tag "pathway".
:param models.Metabolite metabolite_instance: metabolite object which is associated with the instances (e.g. is
involved in that "pathway")
:param dict instance_dict: dictionary which tracks if the found instance is already present in the table and can
then refer to it
:param class table: sqlalchemy class to which the instances belong. E.g. "Pathways"
:param class relation_table: sqlalchemy class which stores the many to many relation between the instances and
the metabolites
:param column_name: Name of the column in the relation tables which does not represent the metabolite.
e.g. reference, pathway etc
:rtype: dict
"""
for instance_element in element:
instance_dict_key = instance_element.text
if instance_dict_key not in instance_dict: # check if instance is already in table
new_instance_dict = {column_name: instance_dict_key}
instance_dict[instance_dict_key] = table(**new_instance_dict)
self.session.add(instance_dict[instance_dict_key])
# create metabolite-instance relation object
new_meta_rel_dict = {"metabolite": metabolite_instance, column_name: instance_dict[instance_dict_key]}
self.session.add(relation_table(**new_meta_rel_dict))
return instance_dict
def _populate_with_2_layer_elements(
self,
element,
metabolite_instance,
instance_dict,
table,
relation_table,
column,
instance_dict_key=None,
metabolite_column='metabolite',
):
"""Parse and populate database with metabolite elements, which themselves have two more layers.
:param element: the current parent XML element. E.g. "pathways" where the children would have the tag "pathway".
:param models.Metabolite metabolite_instance: metabolite object which is associated with the instances (e.g. is
involved in that "pathway")
:param dict instance_dict: dictionary which tracks if the found instance is already present in the table and can
then refer to it
:param type table: sqlalchemy class to which the instances belong. E.g. "Pathways"
:param type relation_table: sqlalchemy class which stores the many to many relation between the instances and
the metabolites
:param str column: column name of the relation table which is not the metabolite
:param str instance_dict_key: String which is used as the key for the instance_dict. (to ensure uniqueness in
the instance_dict)
:param str metabolite_column: column of the relation table which represents the foreignkey to the main table.
In our database model the Metabolite table.
:rtype: dict
"""
if instance_dict_key is None and len(element) > 0:
instance_dict_key = self._get_tag(element[0][0].tag)
for instance_element in element:
# build pathway object dict to create pathway object
instance_object_dict = {}
# create pathway instance
for instance_sub_element in instance_element:
cutted_pathway_tag = self._get_tag(instance_sub_element.tag)
instance_object_dict[cutted_pathway_tag] = instance_sub_element.text
# add MetabolitePathway relation and continue with next pathway if pathway already present in Pathways
if instance_object_dict[instance_dict_key] in instance_dict:
new_meta_rel_dict = {
metabolite_column: metabolite_instance,
column: instance_dict[instance_object_dict[instance_dict_key]]
}
new_meta_rel = relation_table(**new_meta_rel_dict)
self.session.add(new_meta_rel)
continue
instance_dict[instance_object_dict[instance_dict_key]] = table(**instance_object_dict)
self.session.add(instance_dict[instance_object_dict[instance_dict_key]])
new_meta_rel_dict = {
metabolite_column: metabolite_instance,
column: instance_dict[instance_object_dict[instance_dict_key]]
}
new_meta_rel = relation_table(**new_meta_rel_dict)
self.session.add(new_meta_rel)
return instance_dict
def _populate_diseases(
self,
element,
references_dict,
diseases_dict,
metabolite_instance,
disease_ontologies=None,
map_dis=True,
):
"""Populates the database with disease and related reference information.
:param element: Element object from the xml ElementTree
:param dict references_dict: Dictionary to keep track of which references are already in the database
:param dict diseases_dict: Dictionary to keep track of which diseases are already in the database
:param models.Metabolite metabolite_instance: Metabolite object to which the diseases and references are related
:param boolean map_dis: If True the HMDB disease names will be mapped to different ontologies.
:rtype: dict, dict
"""
for disease_element in element:
disease_instance = Disease()
for disease_sub_element in disease_element:
dtag = self._get_tag(disease_sub_element.tag)
if dtag != "references":
setattr(disease_instance, dtag, disease_sub_element.text)
continue
if disease_instance.name not in diseases_dict: # add disease instance if not already in table
# map to different disease ontologies if map is True
if map_dis:
disease_lower = disease_instance.name.lower() # for case insensitivity
for ontology in disease_ontologies:
if disease_lower not in disease_ontologies[ontology]:
continue
v = disease_ontologies[ontology][disease_lower]
if ontology == DOID:
setattr(disease_instance, 'dion', v)
elif ontology == HP:
setattr(disease_instance, 'hpo', v)
elif ontology == MESHD:
setattr(disease_instance, 'mesh_diseases', v)
diseases_dict[disease_instance.name] = disease_instance
self.session.add(disease_instance)
for reference_element in disease_sub_element:
new_reference_object_dict = {} # dict to check if reference is already presend in table
for reference_sub_element in reference_element: # construct new reference object
reference_tag = self._get_tag(reference_sub_element.tag)
new_reference_object_dict[reference_tag] = reference_sub_element.text
# add if not already in reference table
if new_reference_object_dict['reference_text'] not in references_dict:
references_dict[new_reference_object_dict['reference_text']] = Reference(
**new_reference_object_dict)
self.session.add(references_dict[new_reference_object_dict['reference_text']])
rel_meta_dis_ref = MetaboliteDiseaseReference(
metabolite=metabolite_instance,
disease=diseases_dict[disease_instance.name],
reference=references_dict[new_reference_object_dict['reference_text']]
)
self.session.add(rel_meta_dis_ref)
return references_dict, diseases_dict
@staticmethod
def _disease_ontology_dict(ontology: str) -> Mapping[str, str]:
"""Create a dictionary from the disease ontologies used for mapping HMDB disease names to those ontologies."""
doid_path = ONTOLOGY_NAMESPACES[ontology]
doid_ns = get_bel_resource(doid_path)
return {value.lower(): value for value in doid_ns['Values']}
def populate(self, source: Optional[str] = None, map_dis: bool = True, group_size: int = 500_000):
"""Populate the database with the HMDB data.
:param source: Path to an .xml file. If None the whole HMDB will be downloaded and used for population.
:param map_dis: Should diseases be mapped?
"""
# construct sets for disease ontologies for mapping hmdb diseases
if not map_dis:
disease_ontologies = None
else:
disease_ontologies = {
ontology: self._disease_ontology_dict(ontology)
for ontology in ONTOLOGIES
}
# construct xml tree
tree = get_data(source)
root = tree.getroot()
# dicts to check unique constraints for specific tables
biofluids_dict = {}
tissues_dict = {}
pathways_dict = {}
proteins_dict = {}
references_dict = {}
diseases_dict = {}
# biofunctions_dict = {}
cellular_locations_dict = {}
# iterate through xml tree
for i, elements in enumerate(tqdm(root, desc='HMDB Metabolite')):
# create metabolite dict used to feed in main metabolite table
metabolite = Metabolite()
for element in elements:
# delete namespace prefix
tag = self._get_tag(element.tag)
# handle wikipedia typo in xml tags
if tag == "wikipidia":
log.warning("HMDB fixed the 'wikipidia' tag to 'wikipedia'. Change code.")
tag = "wikipedia"
if tag == "secondary_accessions":
self.session.add_all([
SecondaryAccession(
metabolite=metabolite,
secondary_accession=secondary_accession_element.text
)
for secondary_accession_element in element
])
elif tag == "synonyms":
synonyms = {
synonym_element.text
for synonym_element in element
}
self.session.add_all([
MetaboliteSynonym(
metabolite=metabolite,
synonym=synonym,
)
for synonym in synonyms
])
elif tag == "taxonomy": # will be delayed to later versions since not important for BEL
continue
elif tag == "ontology":
continue
elif tag == "cellular_locations":
cellular_locations_dict = self._populate_with_1_layer_elements(
element,
metabolite,
cellular_locations_dict,
CellularLocation,
MetaboliteCellularLocation,
"cellular_location"
)
elif tag == "experimental_properties": # will be delayed to later versions since not important for BEL
continue
elif tag == "predicted_properties": # will be delayed to later versions since not important for BEL
continue
elif tag == "spectra": # will not be processed since the corresponding database is down
continue
elif tag == "biospecimen_locations":
biofluids_dict = self._populate_with_1_layer_elements(
element,
metabolite,
biofluids_dict,
Biofluid,
MetaboliteBiofluid,
'biofluid',
)
elif tag == "tissue_locations":
tissues_dict = self._populate_with_1_layer_elements(
element,
metabolite,
tissues_dict,
Tissue,
MetaboliteTissue,
'tissue',
)
elif tag == "pathways":
pathways_dict = self._populate_with_2_layer_elements(
element,
metabolite,
pathways_dict,
Pathway,
MetabolitePathway,
'pathway',
)
elif tag == "normal_concentrations": # will be delayed to later versions since not important for BEL
continue
elif tag == "abnormal_concentrations": # will be delayed to later versions since not important for BEL
continue
elif tag == "diseases":
references_dict, diseases_dict = self._populate_diseases(
element,
references_dict,
diseases_dict,
metabolite,
disease_ontologies,
map_dis=map_dis,
)
elif tag == "general_references":
references_dict = self._populate_with_2_layer_elements(
element,
metabolite,
references_dict,
Reference,
MetaboliteReference,
'reference',
"reference_text",
)
elif tag == "protein_associations":
proteins_dict = self._populate_with_2_layer_elements(
element,
metabolite,
proteins_dict,
Protein,
MetaboliteProtein,
'protein',
)
else: # feed in main metabolite table
setattr(metabolite, tag, element.text)
self.session.add(metabolite)
if (i + 1) % group_size:
log.warning('committing')
self.session.commit()
self.session.commit()
def get_metabolite_by_accession(self, hmdb_metabolite_accession: str) -> Optional[Metabolite]:
"""Query the constructed HMDB database and extract a metabolite object.
:param hmdb_metabolite_accession: HMDB metabolite identifier
Example:
>>> import bio2bel_hmdb
>>> manager = bio2bel_hmdb.Manager()
>>> manager.get_metabolite_by_accession("HMDB00072")
"""
return self.session.query(Metabolite).filter(Metabolite.accession == hmdb_metabolite_accession).one_or_none()
def query_metabolite_associated_proteins(self, hmdb_metabolite_id: str) -> Optional[List[Protein]]:
"""Query the constructed HMDB database to get the metabolite associated protein relations for BEL enrichment
:param hmdb_metabolite_id: HMDB metabolite identifier
"""
metabolite = self.get_metabolite_by_accession(hmdb_metabolite_id)
if metabolite is not None:
return metabolite.proteins
def query_metabolite_associated_diseases(self, hmdb_metabolite_id: str) -> List[Disease]:
"""Query the constructed HMDB database to get the metabolite associated disease relations for BEL enrichment
:param hmdb_metabolite_id: HMDB metabolite identifier
"""
metabolite = self.get_metabolite_by_accession(hmdb_metabolite_id)
return metabolite.diseases
def query_disease_associated_metabolites(self, disease_name: str) -> List[Metabolite]:
"""Query function that returns a list of metabolite-disease interactions, which are associated to a disease.
:param disease_name: HMDB disease name
"""
return self.session.query(Disease).filter(Disease.name == disease_name).one_or_none().metabolites
def query_protein_associated_metabolites(self, uniprot_id):
"""Query function that returns a list of metabolite-disease interactions, which are associated to a disease.
:param str uniprot_id: uniprot identifier of a protein for which the associated metabolite relations should be
outputted
:rtype: list
"""
return self.session.query(Protein).filter(Protein.uniprot_id == uniprot_id).one_or_none().metabolites
def get_hmdb_accession(self):
"""Create a list of all HMDB metabolite identifiers present in the database.
:rtype: list
"""
accessions = self.session.query(Metabolite.accession).all()
if not accessions:
log.warning("Database not populated. Please populate database before calling this function")
return [a[0] for a in accessions] # if anybody knows a better way of querying for a flat list. Please change.
def get_hmdb_diseases(self):
"""Create a list of all disease names present in the database.
:rtype: list
"""
accessions = self.session.query(Disease.name).all()
if not accessions:
log.warning("Database not populated. Please populate database before calling this function")
return [a for a, in accessions]
def _get_models(self, interaction_table):
"""Extracts all interactions from the many to many interaction table.
:param type interaction_table: Relation table from the database model. (e.g. MetaboliteProteins)
:rtype: query
"""
return self.session.query(interaction_table).all()
def get_metabolite_disease_interactions(self) -> List[MetaboliteDiseaseReference]:
return self._get_models(MetaboliteDiseaseReference)
def get_metabolite_protein_interactions(self) -> List[MetaboliteProtein]:
return self._get_models(MetaboliteProtein)
def count_diseases(self) -> int:
"""Count the number of diseases in the database."""
return self.session.query(Disease).count()
def count_cellular_locations(self):
"""Count the number of cellular locations in the database."""
return self.session.query(CellularLocation).count()
def count_references(self):
"""Count the number of literature references in the database."""
return self.session.query(Reference).count()
def get_reference_by_pubmed_id(self, pubmed_id: str) -> Optional[Reference]:
"""Get a reference by its PubMed identifier if it exists.
:param pubmed_id: The PubMed identifier to search
"""
return self.session.query(Reference).filter(Reference.pubmed_id == pubmed_id).one_or_none()
def count_proteins(self) -> int:
"""Count the number of proteins in the database."""
return self.session.query(Protein).count()
def count_biofunctions(self) -> int:
"""Count the number of biofunctions in the database."""
return self.session.query(Biofunction).count()
def count_metabolites(self) -> int:
"""Count the number of metabolites in the database."""
return self._count_model(Metabolite)
def count_pathways(self) -> int:
"""Count the number of pathways in the database."""
return self._count_model(Pathway)
def count_tissues(self) -> int:
"""Count the number of tissues in the database."""
return self._count_model(Tissue)
def summarize(self) -> Mapping[str, int]:
"""Summarize the contents of the database in a dictionary."""
return dict(
proteins=self.count_proteins(),
diseases=self.count_diseases(),
biofunctions=self.count_biofunctions(),
references=self.count_references(),
cellular_locations=self.count_cellular_locations(),
metabolites=self.count_metabolites(),
tissues=self.count_tissues(),
)
| |
from __future__ import unicode_literals
import os
import re
import warnings
from contextlib import contextmanager
from datetime import timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.files import File
from django.utils import six, timezone
from djblets.testing.testcases import (FixturesCompilerMixin,
TestCase as DjbletsTestCase)
from oauthlib.common import generate_token
from oauth2_provider.models import AccessToken
from reviewboard import scmtools, initialize
from reviewboard.accounts.models import ReviewRequestVisit
from reviewboard.attachments.models import FileAttachment
from reviewboard.diffviewer.differ import DiffCompatVersion
from reviewboard.diffviewer.models import DiffSet, DiffSetHistory, FileDiff
from reviewboard.notifications.models import WebHookTarget
from reviewboard.oauth.models import Application
from reviewboard.reviews.models import (Comment,
FileAttachmentComment,
GeneralComment,
Group,
Review,
ReviewRequest,
ReviewRequestDraft,
Screenshot,
ScreenshotComment,
StatusUpdate)
from reviewboard.scmtools.models import Repository, Tool
from reviewboard.site.models import LocalSite
from reviewboard.webapi.models import WebAPIToken
class TestCase(FixturesCompilerMixin, DjbletsTestCase):
"""The base class for Review Board test cases.
This class provides a number of convenient functions for creating
common objects for testing, such as review requests and comments. They're
populated with default data that can be overridden by the callers.
This also overcomes an annoyance with default Django unit tests where
the cache is not cleared across tests, leading to inconsistent results
and useless testing.
"""
local_site_name = 'local-site-1'
local_site_id = 1
ws_re = re.compile(r'\s+')
DEFAULT_FILEDIFF_DATA = (
b'--- README\trevision 123\n'
b'+++ README\trevision 123\n'
b'@@ -1 +1 @@\n'
b'-Hello, world!\n'
b'+Hello, everybody!\n'
)
DEFAULT_GIT_FILEDIFF_DATA = (
b'diff --git a/README b/README\n'
b'index 94bdd3e..197009f 100644\n'
b'--- README\n'
b'+++ README\n'
b'@@ -2 +2 @@\n'
b'-blah blah\n'
b'+blah!\n'
)
def setUp(self):
super(TestCase, self).setUp()
initialize()
self._local_sites = {}
# Clear the cache so that previous tests don't impact this one.
cache.clear()
def shortDescription(self):
"""Returns the description of the current test.
This changes the default behavior to replace all newlines with spaces,
allowing a test description to span lines. It should still be kept
short, though.
"""
doc = self._testMethodDoc
if doc is not None:
doc = doc.split('\n\n', 1)[0]
doc = self.ws_re.sub(' ', doc).strip()
return doc
def get_local_site_or_none(self, name):
"""Returns a LocalSite matching the name, if provided, or None."""
if name:
return self.get_local_site(name=name)
else:
return None
def get_local_site(self, name):
if name not in self._local_sites:
self._local_sites[name] = LocalSite.objects.get(name=name)
return self._local_sites[name]
def create_webapi_token(self, user, note='Sample note',
policy={'access': 'rw'},
with_local_site=False,
**kwargs):
"""Creates a WebAPIToken for testing."""
if with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
else:
local_site = None
return WebAPIToken.objects.generate_token(user=user,
note=note,
policy=policy,
local_site=local_site)
@contextmanager
def assert_warns(self, cls=DeprecationWarning, message=None):
"""A context manager for asserting code generates a warning.
This method only supports code which generates a single warning.
Tests which make use of code generating multiple warnings will
need to manually catch their warnings.
"""
with warnings.catch_warnings(record=True) as w:
# Some warnings such as DeprecationWarning are filtered by
# default, stop filtering them.
warnings.simplefilter("always")
self.assertEqual(len(w), 0)
yield
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, cls))
if message is not None:
self.assertEqual(message, six.text_type(w[-1].message))
def create_diff_file_attachment(self, filediff, from_modified=True,
review_request=None,
orig_filename='filename.png',
caption='My Caption',
mimetype='image/png',
**kwargs):
"""Creates a diff-based FileAttachment for testing.
The FileAttachment is tied to the given FileDiff. It's populated
with default data that can be overridden by the caller.
"""
file_attachment = FileAttachment.objects.create_from_filediff(
filediff=filediff,
from_modified=from_modified,
caption=caption,
orig_filename=orig_filename,
mimetype=mimetype,
**kwargs)
filename = os.path.join(settings.STATIC_ROOT, 'rb', 'images',
'logo.png')
with open(filename, 'r') as f:
file_attachment.file.save(filename, File(f), save=True)
if review_request:
review_request.file_attachments.add(file_attachment)
return file_attachment
def create_diffset(self, review_request=None, revision=1, repository=None,
draft=False, name='diffset'):
"""Creates a DiffSet for testing.
The DiffSet defaults to revision 1. This can be overriden by the
caller.
DiffSets generally are tied to a ReviewRequest, but it's optional.
"""
if review_request:
repository = review_request.repository
diffset = DiffSet.objects.create(
name=name,
revision=revision,
repository=repository,
diffcompat=DiffCompatVersion.DEFAULT)
if review_request:
if draft:
review_request_draft = \
ReviewRequestDraft.create(review_request)
review_request_draft.diffset = diffset
review_request_draft.save()
else:
review_request.diffset_history.diffsets.add(diffset)
return diffset
def create_diff_comment(self, review, filediff, interfilediff=None,
text='My comment', issue_opened=False,
issue_status=None, first_line=1, num_lines=5,
extra_fields=None, reply_to=None, **kwargs):
"""Create a Comment for testing.
The comment is tied to the given Review and FileDiff (and, optionally,
an interfilediff). It's populated with default data that can be
overridden by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review associated with the comment.
filediff (reviewboard.diffviewer.models.FileDiff):
The FileDiff associated with the comment.
interfilediff (reviewboard.diffviewer.models.FileDiff, optional):
The FileDiff used for the end of an interdiff range associated
with the comment.
text (unicode):
The text for the comment.
issue_opened (bool, optional):
Whether an issue is to be opened for the comment.
issue_status (unicode, optional):
The issue status to set, if an issue is opened. Defaults to
being an open issue.
first_line (int, optional):
The first line (0-based) of the comment range.
num_lines (int, optional):
The number of lines in the comment.
extra_fields (dict, optional):
Extra data to set on the comment.
reply_to (reviewboard.reviews.models.diff_comment.Comment,
optional):
The comment this comment replies to.
**kwargs (dict):
Additional model attributes to set on the comment.
Returns:
reviewboard.reviews.models.diff_comment.Comment:
The resulting comment.
"""
if issue_opened and not issue_status:
issue_status = Comment.OPEN
comment = Comment(
filediff=filediff,
interfilediff=interfilediff,
first_line=first_line,
num_lines=num_lines,
text=text,
issue_opened=issue_opened,
issue_status=issue_status,
reply_to=reply_to,
**kwargs)
if extra_fields:
comment.extra_data = extra_fields
comment.save()
review.comments.add(comment)
return comment
def create_file_attachment(self, review_request,
orig_filename='filename.png',
caption='My Caption',
draft=False,
active=True,
**kwargs):
"""Creates a FileAttachment for testing.
The FileAttachment is tied to the given ReviewRequest. It's populated
with default data that can be overridden by the caller.
"""
file_attachment = self._create_base_file_attachment(
caption=caption,
orig_filename=orig_filename,
**kwargs)
if draft:
review_request_draft = ReviewRequestDraft.create(review_request)
if active:
attachments = review_request_draft.file_attachments
else:
attachments = review_request_draft.inactive_file_attachments
else:
if active:
attachments = review_request.file_attachments
else:
attachments = review_request.inactive_file_attachments
attachments.add(file_attachment)
return file_attachment
def create_user_file_attachment(self, user,
caption='My Caption',
with_local_site=False,
local_site_name=None,
local_site=None,
has_file=False,
orig_filename='filename.png',
**kwargs):
"""Create a user FileAttachment for testing.
The :py:class:`reviewboard.attachments.models.FileAttachment` is tied
to the given :py:class:`django.contrib.auth.models.User`. It's
populated with default data that can be overridden by the caller.
Notably, by default the FileAttachment will be created without a file
or a local_site.
Args:
user (django.contrib.auth.models.User):
The user who owns the file attachment.
caption (unicode, optional):
The caption for the file attachment.
with_local_site (bool, optional):
``True`` if the file attachment should be associated with a
local site. If this is set, one of ``local_site_name`` or
``local_site`` should be provided as well.
local_site_name (unicode, optional):
The name of the local site to associate this attachment with.
local_site (reviewboard.site.models.LocalSite, optional):
The local site to associate this attachment with.
has_file (bool, optional):
``True`` if an actual file object should be included in the
model.
orig_filename (unicode, optional):
The original name of the file to set in the model.
kwargs (dict):
Additional keyword arguments to pass into the FileAttachment
constructor.
Returns:
reviewboard.attachments.models.FileAttachment:
The new file attachment instance.
"""
return self._create_base_file_attachment(
caption=caption,
user=user,
has_file=has_file,
orig_filename=orig_filename,
with_local_site=with_local_site,
local_site_name=local_site_name,
local_site=local_site,
**kwargs)
def create_file_attachment_comment(self, review, file_attachment,
diff_against_file_attachment=None,
text='My comment', issue_opened=False,
issue_status=None, extra_fields=None,
reply_to=None, **kwargs):
"""Create a FileAttachmentComment for testing.
The comment is tied to the given Review and FileAttachment. It's
populated with default data that can be overridden by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review associated with the comment.
file_attachment (reviewboard.attachments.models.FileAttachment):
The file attachment associated with the comment.
diff_against_file_attachment (reviewboard.attachments.models.
FileAttachment, optional):
The file attachment being diff against, for comments on
attachment diffs.
text (unicode):
The text for the comment.
issue_opened (bool, optional):
Whether an issue is to be opened for the comment.
issue_status (unicode, optional):
The issue status to set, if an issue is opened. Defaults to
being an open issue.
extra_fields (dict, optional):
Extra data to set on the comment.
reply_to (reviewboard.reviews.models.file_attachment_comment.
FileAttachmentComment, optional):
The comment this comment replies to.
**kwargs (dict):
Additional model attributes to set on the comment.
Returns:
reviewboard.reviews.models.file_attachment_comment.FileAttachmentComment:
The resulting comment.
"""
if issue_opened and not issue_status:
issue_status = FileAttachmentComment.OPEN
comment = FileAttachmentComment(
file_attachment=file_attachment,
diff_against_file_attachment=diff_against_file_attachment,
text=text,
issue_opened=issue_opened,
issue_status=issue_status,
reply_to=reply_to,
**kwargs)
if extra_fields:
comment.extra_data = extra_fields
comment.save()
review.file_attachment_comments.add(comment)
return comment
def create_filediff(self, diffset, source_file='/test-file',
dest_file='/test-file', source_revision='123',
dest_detail='124', status=FileDiff.MODIFIED,
diff=DEFAULT_FILEDIFF_DATA):
"""Creates a FileDiff for testing.
The FileDiff is tied to the given DiffSet. It's populated with
default data that can be overridden by the caller.
"""
return FileDiff.objects.create(
diffset=diffset,
source_file=source_file,
dest_file=dest_file,
source_revision=source_revision,
dest_detail=dest_detail,
status=status,
diff=diff)
def create_repository(self, with_local_site=False, name='Test Repo',
tool_name='Git', path=None, local_site=None,
**kwargs):
"""Creates a Repository for testing.
The Repository may optionally be attached to a LocalSite. It's also
populated with default data that can be overridden by the caller.
This accepts a tool_name of "Git", "Mercurial" or "Subversion".
The correct bundled repository path will be used for the given
tool_name.
"""
if not local_site:
if with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
else:
local_site = None
testdata_dir = os.path.join(os.path.dirname(scmtools.__file__),
'testdata')
if not path:
if tool_name in ('Git', 'Test',
'TestToolSupportsPendingChangeSets'):
path = os.path.join(testdata_dir, 'git_repo')
elif tool_name == 'Subversion':
path = 'file://' + os.path.join(testdata_dir, 'svn_repo')
elif tool_name == 'Mercurial':
path = os.path.join(testdata_dir, 'hg_repo.bundle')
elif tool_name == 'CVS':
path = os.path.join(testdata_dir, 'cvs_repo')
else:
raise NotImplementedError
return Repository.objects.create(
name=name,
local_site=local_site,
tool=Tool.objects.get(name=tool_name),
path=path,
**kwargs)
def create_review_request(self, with_local_site=False, local_site=None,
summary='Test Summary',
description='Test Description',
testing_done='Testing',
submitter='doc',
branch='my-branch',
local_id=1001,
bugs_closed='', status='P', public=False,
publish=False, commit_id=None, changenum=None,
repository=None, id=None,
create_repository=False):
"""Create a ReviewRequest for testing.
The ReviewRequest may optionally be attached to a LocalSite. It's also
populated with default data that can be overridden by the caller.
If create_repository is True, a Repository will be created
automatically. If set, a custom repository cannot be provided.
The provided submitter may either be a username or a User object.
If publish is True, ReviewRequest.publish() will be called.
"""
if not local_site:
if with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
else:
local_site = None
if not local_site:
local_id = None
if create_repository:
assert not repository
repository = \
self.create_repository(with_local_site=with_local_site)
if not isinstance(submitter, User):
submitter = User.objects.get(username=submitter)
review_request = ReviewRequest(
summary=summary,
description=description,
branch=branch,
testing_done=testing_done,
local_site=local_site,
local_id=local_id,
submitter=submitter,
diffset_history=DiffSetHistory.objects.create(),
repository=repository,
public=public,
commit_id=commit_id,
changenum=changenum,
bugs_closed=bugs_closed,
status=status)
# Set this separately to avoid issues with CounterField updates.
review_request.id = id
review_request.save()
if publish:
review_request.publish(review_request.submitter)
return review_request
def create_visit(self, review_request, visibility, user='doc',
username=None, timestamp=None):
"""Create a ReviewRequestVisit for testing.
The ReviewRequestVisit is tied to the given ReviewRequest and User.
It's populated with default data that can be overridden by the caller.
The provided user may either be a username or a User object.
"""
if not isinstance(user, basestring):
user = User.objects.get(username=user)
return ReviewRequestVisit.objects.create(
review_request=review_request,
visibility=visibility,
user=user)
def create_review(self, review_request, user='dopey',
body_top='Test Body Top', body_bottom='Test Body Bottom',
ship_it=False, publish=False, timestamp=None, **kwargs):
"""Creates a Review for testing.
The Review is tied to the given ReviewRequest. It's populated with
default data that can be overridden by the caller.
The provided user may either be a username or a User object.
If publish is True, Review.publish() will be called.
Args:
review_request (reviewboard.reviews.models.review_request.
ReviewRequest):
The review request the review is filed against.
user (unicode or django.contrib.auth.models.User, optional):
The username or User object owning the review.
body_top (unicode, optional):
The text for the ``body_top`` field.
body_bottom (unicode, optional):
The text for the ``body_bottom`` field.
ship_it (bool, optional):
The Ship It state for the review.
publish (bool, optional):
Whether to publish the review immediately after creation.
timestamp (datetime.datetime, optional):
The timestamp for the review.
**kwargs (dict):
Additional attributes to set in the review.
Returns:
reviewboard.reviews.models.review.Review:
The resulting review.
"""
if not isinstance(user, User):
user = User.objects.get(username=user)
review = Review.objects.create(
review_request=review_request,
user=user,
body_top=body_top,
body_bottom=body_bottom,
ship_it=ship_it,
**kwargs)
if publish:
review.publish()
if timestamp:
Review.objects.filter(pk=review.pk).update(timestamp=timestamp)
review.timestamp = timestamp
return review
def create_review_group(self, name='test-group', with_local_site=False,
local_site=None, visible=True, invite_only=False,
is_default_group=False):
"""Creates a review group for testing.
The group may optionally be attached to a LocalSite. It's also
populated with default data that can be overridden by the caller.
"""
if not local_site and with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
return Group.objects.create(
name=name,
local_site=local_site,
visible=visible,
invite_only=invite_only,
is_default_group=is_default_group)
def create_reply(self, review, user='grumpy', username=None,
body_top='Test Body Top', timestamp=None,
publish=False):
"""Creates a review reply for testing.
The reply is tied to the given Review. It's populated with default
data that can be overridden by the caller.
"""
if not isinstance(user, User):
user = User.objects.get(username=user)
reply = Review.objects.create(
review_request=review.review_request,
user=user,
body_top=body_top,
base_reply_to=review,
timestamp=timestamp)
if publish:
reply.publish()
return reply
def create_screenshot(self, review_request, caption='My caption',
draft=False, active=True):
"""Creates a Screenshot for testing.
The Screenshot is tied to the given ReviewRequest. It's populated
with default data that can be overridden by the caller.
"""
screenshot = Screenshot(caption=caption)
filename = os.path.join(settings.STATIC_ROOT, 'rb', 'images',
'logo.png')
with open(filename, 'r') as f:
screenshot.image.save(filename, File(f), save=True)
if draft:
review_request_draft = ReviewRequestDraft.create(review_request)
if active:
screenshots = review_request_draft.screenshots
else:
screenshots = review_request_draft.inactive_screenshots
else:
if active:
screenshots = review_request.screenshots
else:
screenshots = review_request.inactive_screenshots
screenshots.add(screenshot)
return screenshot
def create_screenshot_comment(self, review, screenshot, text='My comment',
x=1, y=1, w=5, h=5, issue_opened=False,
issue_status=None, extra_fields=None,
reply_to=None, **kwargs):
"""Create a ScreenshotComment for testing.
The comment is tied to the given Review and Screenshot. It's
It's populated with default data that can be overridden by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review associated with the comment.
screenshot (reviewboard.reviews.models.screenshot.Screenshot):
The screenshot associated with the comment.
text (unicode):
The text for the comment.
x (int, optional):
The X location for the comment on the screenshot.
y (int, optional):
The Y location for the comment on the screenshot.
w (int, optional):
The width for the comment on the screenshot.
h (int, optional):
The height for the comment on the screenshot.
issue_opened (bool, optional):
Whether an issue is to be opened for the comment.
issue_status (unicode, optional):
The issue status to set, if an issue is opened. Defaults to
being an open issue.
extra_fields (dict, optional):
Extra data to set on the comment.
reply_to (reviewboard.reviews.models.general_comment.
GeneralComment, optional):
The comment this comment replies to.
**kwargs (dict):
Additional model attributes to set on the comment.
Returns:
reviewboard.reviews.models.screenshot_comment.ScreenshotComment:
The resulting comment.
"""
if issue_opened and not issue_status:
issue_status = ScreenshotComment.OPEN
comment = ScreenshotComment(
screenshot=screenshot,
text=text,
x=x,
y=y,
w=w,
h=h,
issue_opened=issue_opened,
issue_status=issue_status,
reply_to=reply_to,
**kwargs)
if extra_fields:
comment.extra_data = extra_fields
comment.save()
review.screenshot_comments.add(comment)
return comment
def _create_base_file_attachment(self,
caption='My Caption',
orig_filename='filename.png',
has_file=True,
user=None,
with_local_site=False,
local_site_name=None,
local_site=None,
**kwargs):
"""Create a FileAttachment object with the given parameters.
When creating a
:py:class:`reviewboard.attachments.models.FileAttachment` that will be
associated to a review request, a user and local_site should not be
specified.
Args:
caption (unicode, optional):
The caption for the file attachment.
orig_filename (unicode, optional):
The original name of the file to set in the model.
has_file (bool, optional):
``True`` if an actual file object should be included in the
model.
user (django.contrib.auth.models.User, optonal):
The user who owns the file attachment.
with_local_site (bool, optional):
``True`` if the file attachment should be associated with a
local site. If this is set, one of ``local_site_name`` or
``local_site`` should be provided as well.
local_site_name (unicode, optional):
The name of the local site to associate this attachment with.
local_site (reviewboard.site.models.LocalSite, optional):
The local site to associate this attachment with.
kwargs (dict):
Additional keyword arguments to pass into the FileAttachment
constructor.
Returns:
reviewboard.attachments.models.FileAttachment:
The new file attachment instance.
"""
if with_local_site:
local_site = self.get_local_site(name=local_site_name)
file_attachment = FileAttachment(
caption=caption,
user=user,
uuid='test-uuid',
local_site=local_site,
**kwargs)
if has_file:
filename = os.path.join(settings.STATIC_ROOT, 'rb', 'images',
'logo.png')
file_attachment.orig_filename = orig_filename
file_attachment.mimetype = 'image/png'
with open(filename, 'r') as f:
file_attachment.file.save(filename, File(f), save=True)
file_attachment.save()
return file_attachment
def create_general_comment(self, review, text='My comment',
issue_opened=False, issue_status=None,
extra_fields=None, reply_to=None, **kwargs):
"""Create a GeneralComment for testing.
The comment is tied to the given Review. It is populated with
default data that can be overridden by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review associated with the comment.
text (unicode):
The text for the comment.
issue_opened (bool, optional):
Whether an issue is to be opened for the comment.
issue_status (unicode, optional):
The issue status to set, if an issue is opened. Defaults to
being an open issue.
extra_fields (dict, optional):
Extra data to set on the comment.
reply_to (reviewboard.reviews.models.general_comment.
GeneralComment, optional):
The comment this comment replies to.
**kwargs (dict):
Additional model attributes to set on the comment.
Returns:
reviewboard.reviews.models.general_comment.GeneralComment:
The resulting comment.
"""
if issue_opened and not issue_status:
issue_status = GeneralComment.OPEN
comment = GeneralComment(
text=text,
issue_opened=issue_opened,
issue_status=issue_status,
reply_to=reply_to,
**kwargs)
if extra_fields:
comment.extra_data = extra_fields
comment.save()
review.general_comments.add(comment)
return comment
def create_status_update(self, review_request, user='dopey',
service_id='service', summary='Status Update',
state=StatusUpdate.PENDING,
review=None,
change_description=None):
"""Create a status update for testing.
It is populated with default data that can be overridden by the caller.
Args:
review_request (reviewboard.reviews.models.ReviewRequest):
The review request to associate with the new status update.
user (django.contrib.auth.models.User or unicode):
Either the user model or the username of the user who should
own the status update.
service_id (unicode):
The ID to fill in for the new model.
summary (unicode):
The summary to fill in for the new model.
state (unicode):
The state for the new model. This must be one of the valid
choices for the state field.
review (reviewboard.reviews.models.review.Review, optional):
The review associated with this status update.
change_description (reviewboard.changedescs.models.
ChangeDescription, optional):
The change description for this status update.
Returns:
reviewboard.reviews.models.StatusUpdate:
The new status update.
"""
if not isinstance(user, User):
user = User.objects.get(username=user)
return StatusUpdate.objects.create(
review_request=review_request,
change_description=change_description,
service_id=service_id,
summary=summary,
state=state,
review=review,
user=user)
def create_webhook(self, enabled=False, events=WebHookTarget.ALL_EVENTS,
url='http://example.com',
encoding=WebHookTarget.ENCODING_JSON,
use_custom_content=False, custom_content='',
secret='', apply_to=WebHookTarget.APPLY_TO_ALL,
repositories=None, with_local_site=False,
local_site=None, extra_fields=None):
"""Create a webhook for testing.
It is populated with default data that can be overridden by the caller.
Args:
enabled (bool):
Whether or not the webhook is enabled when it is created.
events (unicode):
A comma-separated list of events that the webhook will trigger
on.
url (unicode):
The URL that requests will be made against.
encoding (unicode):
The encoding of the payload to send.
use_custom_content (bool):
Determines if custom content will be sent for the payload (if
``True``) or if it will be auto-generated (if ``False``).
custom_content (unicode):
The custom content to send when ``use_custom_content`` is
``True``.
secret (unicode):
An HMAC secret to sign the payload with.
apply_to (unicode):
The types of repositories the webhook will apply to.
repositories (list):
A list of repositories that the webhook will be limited to if
``apply_to`` is ``WebHookTarget.APPLY_TO_SELECTED_REPOS``.
with_local_site (bool):
Determines if this should be created with a local site.
local_site (reviewboard.site.models.LocalSite):
An optional local site. If ``with_local_site`` is ``True`` and
this argument is ``None``, the local site will be looked up.
extra_fields (dict):
Extra data to be imported into the webhook.
Returns:
WebHookTarget: A webhook constructed with the given arguments.
"""
if not local_site:
if with_local_site:
local_site = self.get_local_site(name=self.local_site_name)
else:
local_site = None
webhook = WebHookTarget.objects.create(
enabled=enabled,
events=events.split(','),
url=url,
encoding=encoding,
use_custom_content=use_custom_content,
custom_content=custom_content,
secret=secret,
apply_to=apply_to,
local_site=local_site)
if repositories:
webhook.repositories = repositories
if extra_fields:
webhook.extra_data = extra_fields
webhook.save(update_fields=['extra_data'])
return webhook
def create_oauth_application(
self, user, local_site=None, with_local_site=False,
redirect_uris='http://example.com',
authorization_grant_type=Application.GRANT_CLIENT_CREDENTIALS,
client_type=Application.CLIENT_PUBLIC,
**kwargs):
"""Create an OAuth application.
Args:
user (django.contrib.auth.models.User):
The user whom is to own the application.
local_site (reviewboard.site.models.LocalSite, optional):
The LocalSite for the application to be associated with, if
any.
redirect_uris (unicode, optional):
A whitespace-separated list of allowable redirect URIs.
authorization_grant_type (unicode, optional):
The grant type for the application.
client_type (unicode, optional):
The application client type.
**kwargs (dict):
Additional keyword arguments to pass to the
:py:class:`~reviewboard.oauth.models.Application` initializer.
Returns:
reviewboard.oauth.models.Application:
The created application.
"""
if not local_site:
if with_local_site:
local_site = self.get_local_site(self.local_site_name)
else:
local_site = None
return Application.objects.create(
user=user,
local_site=local_site,
authorization_grant_type=authorization_grant_type,
redirect_uris=redirect_uris,
client_type=client_type,
extra_data='{}',
**kwargs)
def create_oauth_token(self, application, user, scope='', expires=None,
**kwargs):
"""Create an OAuth2 access token for testing.
Args:
application (reviewboard.oauth.models.Application):
The application the token should be associated with.
user (django.contrib.auth.models.User):
The user who should own the token.
scope (unicode, optional):
The scopes of the token. This argument defaults to the empty
scope.
expires (datetime.timedelta, optional):
How far into the future the token expires. If not provided,
this argument defaults to one hour.
Returns:
oauth2_provider.models.AccessToken:
The created access token.
"""
if expires is None:
expires = timedelta(hours=1)
return AccessToken.objects.create(
application=application,
token=generate_token(),
expires=timezone.now() + expires,
scope=scope,
user=user,
)
| |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Usage:
ping [-c <count>] [-i <interval>] [-W <timeout>] <destination>
Options:
-c <count>, --count=<count> [default: 5]
-i <interval>, --interval=<interval> [default: 1.0]
Wait interval seconds between sending each packet. The default is to wait for one second between each packet normally.
-W <timeout>, --timeout=<timeout> [default: 2.]
Time to wait for a response, in seconds. The option affects only timeout in absense of any responses, otherwise ping waits for two RTTs.
A pure python ping implementation using raw socket.
Note that ICMP messages can only be sent from processes running as root.
Derived from ping.c distributed in Linux's netkit. That code is
copyright (c) 1989 by The Regents of the University of California.
That code is in turn derived from code written by Mike Muuss of the
US Army Ballistic Research Laboratory in December, 1983 and
placed in the public domain. They have my thanks.
Bugs are naturally mine. I'd be glad to hear about them. There are
certainly word - size dependenceies here.
Copyright (c) Matthew Dixon Cowles, <http://www.visi.com/~mdc/>.
Distributable under the terms of the GNU General Public License
version 2. Provided with no warranties of any sort.
Original Version from Matthew Dixon Cowles:
-> ftp://ftp.visi.com/users/mdc/ping.py
Rewrite by Jens Diemer:
-> http://www.python-forum.de/post-69122.html#69122
Revision history
~~~~~~~~~~~~~~~~
August 18, 2016
changes by J. Bain
- implemented interface for pythonista stash
March 11, 2010
changes by Samuel Stauffer:
- replaced time.clock with default_timer which is set to
time.clock on windows and time.time on other systems.
May 30, 2007
little rewrite by Jens Diemer:
- change socket asterisk import to a normal import
- replace time.time() with time.clock()
- delete "return None" (or change to "return" only)
- in checksum() rename "str" to "source_string"
November 22, 1997
Initial hack. Doesn't do much, but rather than try to guess
what features I (or others) will want in the future, I've only
put in what I need now.
December 16, 1997
For some reason, the checksum bytes are in the wrong order when
this is run under Solaris 2.X for SPARC but it works right under
Linux x86. Since I don't know just what's wrong, I'll swap the
bytes always and then do an htons().
December 4, 2000
Changed the struct.pack() calls to pack the checksum and ID as
unsigned. My thanks to Jerome Poincheval for the fix.
Januari 27, 2015
Changed receive response to not accept ICMP request messages.
It was possible to receive the very request that was sent.
Last commit info:
~~~~~~~~~~~~~~~~~
$LastChangedDate: $
$Rev: $
$Author: $
"""
from __future__ import print_function
import os
import select
import socket
import struct
import sys
import time
import argparse
from six.moves import xrange
# On Windows, the best timer is time.clock()
# On most other platforms the best timer is time.time()
default_timer = time.clock if sys.platform == "win32" else time.time
# From /usr/include/linux/icmp.h; your milage may vary.
ICMP_ECHO_REQUEST = 8 # Seems to be the same on Solaris.
def checksum(source_string):
"""
I'm not too confident that this is right but testing seems
to suggest that it gives the same answers as in_cksum in ping.c
"""
sum = 0
countTo = (len(source_string) / 2) * 2
count = 0
while count < countTo:
v1 = source_string[count + 1]
if not isinstance(v1, int):
v1 = ord(v1)
v2 = source_string[count]
if not isinstance(v2, int):
v2 = ord(v2)
thisVal = v1 * 256 + v2
sum = sum + thisVal
sum = sum & 0xffffffff # Necessary?
count = count + 2
if countTo < len(source_string):
sum = sum + ord(source_string[len(source_string) - 1])
sum = sum & 0xffffffff # Necessary?
sum = (sum >> 16) + (sum & 0xffff)
sum = sum + (sum >> 16)
answer = ~sum
answer = answer & 0xffff
# Swap bytes. Bugger me if I know why.
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
def receive_one_ping(my_socket, ID, timeout):
"""
receive the ping from the socket.
"""
timeLeft = timeout
while True:
startedSelect = default_timer()
whatReady = select.select([my_socket], [], [], timeLeft)
howLongInSelect = (default_timer() - startedSelect)
if whatReady[0] == []: # Timeout
return
timeReceived = default_timer()
recPacket, addr = my_socket.recvfrom(1024)
icmpHeader = recPacket[20:28]
type, code, checksum, packetID, sequence = struct.unpack(b"bbHHh", icmpHeader)
# Filters out the echo request itself.
# This can be tested by pinging 127.0.0.1
# You'll see your own request
if type != 8 and packetID == ID:
bytesInDouble = struct.calcsize(b"d")
timeSent = struct.unpack(b"d", recPacket[28:28 + bytesInDouble])[0]
return timeReceived - timeSent
timeLeft = timeLeft - howLongInSelect
if timeLeft <= 0:
return
def send_one_ping(my_socket, dest_addr, ID):
"""
Send one ping to the given >dest_addr<.
"""
dest_addr = socket.gethostbyname(dest_addr)
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
my_checksum = 0
# Make a dummy heder with a 0 checksum.
header = struct.pack(b"bbHHh", ICMP_ECHO_REQUEST, 0, my_checksum, ID, 1)
bytesInDouble = struct.calcsize("d")
data = (192 - bytesInDouble) * b"Q"
data = struct.pack("d", default_timer()) + data
# Calculate the checksum on the data and the dummy header.
my_checksum = checksum(header + data)
# Now that we have the right checksum, we put that in. It's just easier
# to make up a new header than to stuff it into the dummy.
header = struct.pack(b"bbHHh", ICMP_ECHO_REQUEST, 0, socket.htons(my_checksum), ID, 1)
packet = header + data
my_socket.sendto(packet, (dest_addr, 1)) # Don't know about the 1
def do_one(dest_addr, timeout):
"""
Returns either the delay (in seconds) or none on timeout.
"""
icmp = socket.getprotobyname("icmp")
my_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, icmp)
my_ID = os.getpid() & 0xFFFF
send_one_ping(my_socket, dest_addr, my_ID)
delay = receive_one_ping(my_socket, my_ID, timeout)
my_socket.close()
return delay
def verbose_ping(dest_addr, timeout=2, count=4, interval=1.0):
"""
Send >count< ping to >dest_addr< with the given >timeout< and display
the result.
"""
ping_succeeded = False
for i in xrange(count):
print("ping %s..." % dest_addr, end=' ')
try:
delay = do_one(dest_addr, timeout)
except socket.gaierror as e:
print("failed. (socket error: '%s')" % e[1])
break
if delay == None:
print("failed. (timeout within %ssec.)" % timeout)
else:
time.sleep(min(0, interval - delay))
print("got ping in %0.4fms\n" % (delay * 1000))
ping_succeeded = True
return ping_succeeded
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="send ICMP ECHO_REQUEST to network hosts")
parser.add_argument("destination", help="host to ping")
parser.add_argument("-W", "--timeout", help="specify a timeout", type=float, default=2)
parser.add_argument("-c", "--count", help="stop after sending this much ECHO_REQUEST packkets", type=int, default=5)
parser.add_argument("-i", "--interval", help="Wait the specified time between each ping", type=float, default=1.0)
ns = parser.parse_args()
s = verbose_ping(ns.destination, ns.timeout, ns.count, ns.interval)
if s:
sys.exit(0)
else:
sys.exit(1)
| |
#!/usr/bin/env python
# Copyright (c) 2011, 2012 Walter Bender
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import Gst
Gst.init(None)
from fcntl import ioctl
import os
from gettext import gettext as _
from plugins.camera_sensor.tacamera import Camera
from plugins.camera_sensor.v4l2 import v4l2_control, V4L2_CID_AUTOGAIN, \
VIDIOC_G_CTRL, VIDIOC_S_CTRL
from plugins.plugin import Plugin
from TurtleArt.tapalette import make_palette
from TurtleArt.talogo import media_blocks_dictionary
from TurtleArt.tautils import debug_output, power_manager_off
from TurtleArt.taconstants import MEDIA_SHAPES, NO_IMPORT, SKIN_PATHS, \
BLOCKS_WITH_SKIN
from TurtleArt.taprimitive import (ConstantArg, Primitive)
from TurtleArt.tatype import TYPE_NUMBER
class Camera_sensor(Plugin):
def __init__(self, parent):
Plugin.__init__(self)
''' Make sure there is a camera device '''
self._parent = parent
self._status = False
self._ag_control = None
self.devices = []
self.cameras = []
self.luminance = 0
if os.path.exists('/dev/video0'):
self.devices.append('/dev/video0')
if os.path.exists('/dev/video1'):
self.devices.append('/dev/video1')
if len(self.devices) > 0:
self._status = True
else:
self._status = False
def setup(self):
''' Set up the palettes '''
sensors_palette = make_palette('sensor',
colors=["#FF6060", "#A06060"],
help_string=_(
'Palette of sensor blocks'),
position=6)
media_palette = make_palette('media',
colors=["#A0FF00", "#80A000"],
help_string=_('Palette of media objects'),
position=7)
# set up camera-specific blocks
media_blocks_dictionary['camera'] = self.prim_take_picture0
media_blocks_dictionary['camera1'] = self.prim_take_picture1
SKIN_PATHS.append('plugins/camera_sensor/images')
hidden = True
second_cam = False
if self._status:
hidden = False
if len(self.devices) > 1:
second_cam = True
sensors_palette.add_block('luminance',
hidden=hidden,
style='box-style',
label=_('brightness'),
help_string=_(
'light level detected by camera'),
value_block=True,
prim_name='luminance')
self._parent.lc.def_prim(
'luminance', 0,
Primitive(self.prim_read_camera,
return_type=TYPE_NUMBER,
kwarg_descs={'luminance_only': ConstantArg(True)},
call_afterwards=self.after_luminance))
media_palette.add_block('camera',
hidden=hidden,
style='box-style-media',
label=' ',
default='CAMERA',
help_string=_('camera output'),
content_block=True)
media_palette.add_block('camera1',
hidden=not (second_cam),
style='box-style-media',
label=' ',
default='CAMERA',
help_string=_('camera output'),
content_block=True)
# Depreciated block
sensors_palette.add_block(
'read_camera',
hidden=True,
style='box-style',
label=_('brightness'),
help_string=_(
'Average RGB color from camera is pushed to the stack'),
value_block=True,
prim_name='read_camera')
self._parent.lc.def_prim(
'read_camera', 0,
Primitive(self.prim_read_camera,
return_type=TYPE_NUMBER,
kwarg_descs={'luminance_only': ConstantArg(False)}))
NO_IMPORT.append('camera')
BLOCKS_WITH_SKIN.append('camera')
NO_IMPORT.append('camera1')
BLOCKS_WITH_SKIN.append('camera1')
MEDIA_SHAPES.append('camerasmall')
MEDIA_SHAPES.append('cameraoff')
MEDIA_SHAPES.append('camera1small')
MEDIA_SHAPES.append('camera1off')
def start(self):
''' Initialize the camera if there is an camera block in use '''
camera_blocks = len(self._parent.block_list.get_similar_blocks(
'block', ['camera', 'camera1', 'read_camera', 'luminance']))
if not self._parent.running_turtleart or camera_blocks > 0:
if self._status and len(self.cameras) == 0:
for device in self.devices:
self.cameras.append(Camera(device))
power_manager_off(True)
def quit(self):
''' This gets called when the activity quits '''
self._reset_the_camera()
def stop(self):
''' This gets called by the stop button '''
self._reset_the_camera()
def clear(self):
''' This gets called by the clean button and erase button '''
self._reset_the_camera()
def _reset_the_camera(self):
if self._status and len(self.cameras) > 0:
for i, camera in enumerate(self.cameras):
camera.stop_camera_input()
self._set_autogain(1, camera=i) # enable AUTOGAIN
power_manager_off(False)
def _status_report(self):
debug_output('Reporting camera status: %s' % (str(self._status)),
self._parent.running_sugar)
return self._status
# Block primitives used in talogo
def prim_take_picture0(self):
self._take_picture(camera=0)
def prim_take_picture1(self):
self._take_picture(camera=1)
def _take_picture(self, camera=0):
''' method called by media block '''
self._set_autogain(1, camera) # enable AUTOGAIN
self._get_pixbuf_from_camera(camera)
self._parent.lc.pixbuf = self.cameras[camera].pixbuf
def prim_read_camera(self, luminance_only=False, camera=0):
""" Read average pixel from camera and push b, g, r to the stack """
self.luminance_only = luminance_only
if not self._status:
if self.luminance_only:
return -1
else:
self._parent.lc.heap.append(-1)
self._parent.lc.heap.append(-1)
self._parent.lc.heap.append(-1)
return
self._set_autogain(0, camera=camera) # disable AUTOGAIN
self._get_pixbuf_from_camera(camera=camera)
self.calc_luminance(camera=camera)
if self.luminance_only:
return int(self.luminance)
else:
self._parent.lc.heap.append(self.b)
self._parent.lc.heap.append(self.g)
self._parent.lc.heap.append(self.r)
return
def calc_luminance(self, camera=0):
array = self.cameras[camera].pixbuf.get_pixels()
width = self.cameras[camera].pixbuf.get_width()
height = self.cameras[camera].pixbuf.get_height()
if array is not None:
length = int(len(array) / 3)
if length != width * height:
debug_output('array length != width x height (%d != %dx%d)' %
(length, width, height),
self._parent.running_sugar)
# Average the 100 pixels in the center of the screen
r, g, b = 0, 0, 0
row_offset = int((height / 2 - 5) * width * 3)
column_offset = int(width / 2 - 5) * 3
for y in range(10):
i = row_offset + column_offset
for x in range(10):
r += ord(array[i])
i += 1
g += ord(array[i])
i += 1
b += ord(array[i])
i += 1
row_offset += width * 3
if self.luminance_only:
self.luminance = int((r * 0.3 + g * 0.6 + b * 0.1) / 100)
else:
self.r = int(r / 100)
self.g = int(g / 100)
self.b = int(b / 100)
else:
if self.luminance_only:
self.luminance = -1
else:
self.r = -1
self.g = -1
self.b = -1
def after_luminance(self, luminance_only=False):
if self._parent.lc.update_values and luminance_only:
self._parent.lc.update_label_value('luminance', self.luminance)
def _set_autogain(self, state, camera=0):
''' 0 is off; 1 is on '''
if self._ag_control is not None and self._ag_control.value == state:
return
try:
video_capture_device = open(self.devices[camera], 'rw')
except BaseException:
video_capture_device = None
debug_output('video capture device not available',
self._parent.running_sugar)
return
self._ag_control = v4l2_control(V4L2_CID_AUTOGAIN)
try:
ioctl(video_capture_device, VIDIOC_G_CTRL, self._ag_control)
self._ag_control.value = state
ioctl(video_capture_device, VIDIOC_S_CTRL, self._ag_control)
except BaseException:
pass
video_capture_device.close()
def _get_pixbuf_from_camera(self, camera):
''' Regardless of how we get it, we want to return a pixbuf '''
self._parent.lc.pixbuf = None
if self._status:
self.cameras[camera].start_camera_input()
| |
import asyncio
import unittest
import unittest.mock
from aiohttp import streams
class TestFlowControlStreamReader(unittest.TestCase):
def setUp(self):
self.stream = unittest.mock.Mock()
self.transp = self.stream.transport
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def _make_one(self, *args, **kwargs):
return streams.FlowControlStreamReader(
self.stream, limit=1, loop=self.loop, *args, **kwargs)
def test_read(self):
r = self._make_one()
r.paused = True
r.feed_data(b'da', 2)
res = self.loop.run_until_complete(r.read(1))
self.assertEqual(res, b'd')
self.assertTrue(self.transp.resume_reading.called)
def test_readline(self):
r = self._make_one()
r.paused = True
r.feed_data(b'data\n', 5)
res = self.loop.run_until_complete(r.readline())
self.assertEqual(res, b'data\n')
self.assertTrue(self.transp.resume_reading.called)
def test_readany(self):
r = self._make_one()
r.paused = True
r.feed_data(b'data', 4)
res = self.loop.run_until_complete(r.readany())
self.assertEqual(res, b'data')
self.assertTrue(self.transp.resume_reading.called)
def test_readexactly(self):
r = self._make_one()
r.paused = True
r.feed_data(b'datadata', 8)
res = self.loop.run_until_complete(r.readexactly(2))
self.assertEqual(res, b'da')
self.assertTrue(self.transp.resume_reading.called)
def test_feed_data(self):
r = self._make_one()
r._stream.paused = False
r.feed_data(b'datadata', 8)
self.assertTrue(self.transp.pause_reading.called)
class FlowControlMixin:
def test_resume_on_init(self):
stream = unittest.mock.Mock()
stream.paused = True
streams.FlowControlDataQueue(stream, limit=1, loop=self.loop)
self.assertTrue(stream.transport.resume_reading.called)
self.assertFalse(stream.paused)
def test_no_transport_in_init(self):
stream = unittest.mock.Mock()
stream.paused = True
stream.transport = None
streams.FlowControlDataQueue(stream, limit=1, loop=self.loop)
self.assertTrue(stream.paused)
def test_feed_no_waiter(self):
out = self._make_one()
out.feed_data(object(), 100)
self.assertTrue(self.stream.transport.pause_reading.called)
def test_feed_no_transport(self):
self.stream.transport = None
out = self._make_one()
self.stream.paused = False
out.feed_data(object(), 100)
self.assertFalse(self.stream.paused)
def test_feed_with_waiter(self):
self.stream.paused = False
out = self._make_one()
read_task = asyncio.Task(out.read(), loop=self.loop)
def cb():
out.feed_data(object(), 100)
self.loop.call_soon(cb)
self.loop.run_until_complete(read_task)
self.assertFalse(self.stream.transport.pause_reading.called)
self.assertFalse(self.stream.paused)
def test_resume_on_read(self):
out = self._make_one()
out.feed_data(object(), 100)
self.assertTrue(self.stream.paused)
self.loop.run_until_complete(out.read())
self.assertTrue(self.stream.transport.resume_reading.called)
self.assertFalse(self.stream.paused)
def test_resume_on_read_no_transport(self):
item = object()
out = self._make_one()
out.feed_data(item, 100)
self.assertTrue(self.stream.paused)
self.stream.transport = None
res = self.loop.run_until_complete(out.read())
self.assertIs(res, item)
self.assertTrue(self.stream.paused)
def test_no_resume_on_read(self):
out = self._make_one()
out.feed_data(object(), 100)
out.feed_data(object(), 100)
out.feed_data(object(), 100)
self.assertTrue(self.stream.paused)
self.stream.transport.reset_mock()
self.loop.run_until_complete(out.read())
self.assertFalse(self.stream.transport.resume_reading.called)
self.assertTrue(self.stream.paused)
def test_pause_on_read(self):
out = self._make_one()
out._buffer.append((object(), 100))
out._buffer.append((object(), 100))
out._buffer.append((object(), 100))
out._size = 300
self.stream.paused = False
self.loop.run_until_complete(out.read())
self.assertTrue(self.stream.transport.pause_reading.called)
self.assertTrue(self.stream.paused)
def test_no_pause_on_read(self):
item = object()
out = self._make_one()
out._buffer.append((item, 100))
out._size = 100
self.stream.paused = False
res = self.loop.run_until_complete(out.read())
self.assertIs(res, item)
self.assertFalse(self.stream.transport.pause_reading.called)
self.assertFalse(self.stream.paused)
def test_no_pause_on_read_no_transport(self):
item = object()
out = self._make_one()
out._buffer.append((item, 100))
out._buffer.append((object(), 100))
out._buffer.append((object(), 100))
out._size = 300
self.stream.paused = False
self.stream.transport = None
res = self.loop.run_until_complete(out.read())
self.assertIs(res, item)
self.assertFalse(self.stream.paused)
class TestFlowControlDataQueue(unittest.TestCase, FlowControlMixin):
def setUp(self):
self.stream = unittest.mock.Mock()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def _make_one(self, *args, **kwargs):
return streams.FlowControlDataQueue(
self.stream, limit=1, loop=self.loop, *args, **kwargs)
class TestFlowControlChunksQueue(unittest.TestCase, FlowControlMixin):
def setUp(self):
self.stream = unittest.mock.Mock()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
def _make_one(self, *args, **kwargs):
return streams.FlowControlChunksQueue(
self.stream, limit=1, loop=self.loop, *args, **kwargs)
def test_read_eof(self):
out = self._make_one()
read_task = asyncio.Task(out.read(), loop=self.loop)
def cb():
out.feed_eof()
self.loop.call_soon(cb)
self.loop.run_until_complete(read_task)
self.assertTrue(out.at_eof())
def test_read_until_eof(self):
item = object()
out = self._make_one()
out.feed_data(item, 1)
out.feed_eof()
data = self.loop.run_until_complete(out.read())
self.assertIs(data, item)
thing = self.loop.run_until_complete(out.read())
self.assertEqual(thing, b'')
self.assertTrue(out.at_eof())
def test_readany(self):
out = self._make_one()
self.assertIs(out.read.__func__, out.readany.__func__)
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Test the first level model.
"""
from __future__ import with_statement
import os
import shutil
import warnings
import numpy as np
import pandas as pd
from nibabel import (load,
Nifti1Image,
)
from nose.tools import (assert_equal,
assert_raises,
assert_true,
)
from numpy.testing import (assert_almost_equal,
assert_array_equal,
)
from nibabel.tmpdirs import InTemporaryDirectory
from nistats.design_matrix import (check_design_matrix,
make_first_level_design_matrix,
)
from nistats.first_level_model import (first_level_models_from_bids,
FirstLevelModel,
mean_scaling,
run_glm,
)
from nistats.utils import get_bids_files
from nistats._utils.testing import (_create_fake_bids_dataset,
_generate_fake_fmri_data,
_write_fake_fmri_data,
)
BASEDIR = os.path.dirname(os.path.abspath(__file__))
FUNCFILE = os.path.join(BASEDIR, 'functional.nii.gz')
def test_high_level_glm_one_session():
# New API
shapes, rk = [(7, 8, 9, 15)], 3
mask, fmri_data, design_matrices = _generate_fake_fmri_data(shapes, rk)
single_session_model = FirstLevelModel(mask_img=None).fit(
fmri_data[0], design_matrices=design_matrices[0])
assert_true(isinstance(single_session_model.masker_.mask_img_,
Nifti1Image))
single_session_model = FirstLevelModel(mask_img=mask).fit(
fmri_data[0], design_matrices=design_matrices[0])
z1 = single_session_model.compute_contrast(np.eye(rk)[:1])
assert_true(isinstance(z1, Nifti1Image))
def test_high_level_glm_with_data():
# New API
with InTemporaryDirectory():
shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 16)), 3
mask, fmri_data, design_matrices = _write_fake_fmri_data(shapes, rk)
multi_session_model = FirstLevelModel(mask_img=None).fit(
fmri_data, design_matrices=design_matrices)
n_voxels = multi_session_model.masker_.mask_img_.get_data().sum()
z_image = multi_session_model.compute_contrast(np.eye(rk)[1])
assert_equal(np.sum(z_image.get_data() != 0), n_voxels)
assert_true(z_image.get_data().std() < 3.)
# with mask
multi_session_model = FirstLevelModel(mask_img=mask).fit(
fmri_data, design_matrices=design_matrices)
z_image = multi_session_model.compute_contrast(
np.eye(rk)[:2], output_type='z_score')
p_value = multi_session_model.compute_contrast(
np.eye(rk)[:2], output_type='p_value')
stat_image = multi_session_model.compute_contrast(
np.eye(rk)[:2], output_type='stat')
effect_image = multi_session_model.compute_contrast(
np.eye(rk)[:2], output_type='effect_size')
variance_image = multi_session_model.compute_contrast(
np.eye(rk)[:2], output_type='effect_variance')
assert_array_equal(z_image.get_data() == 0., load(mask).get_data() == 0.)
assert_true(
(variance_image.get_data()[load(mask).get_data() > 0] > .001).all())
all_images = multi_session_model.compute_contrast(
np.eye(rk)[:2], output_type='all')
assert_array_equal(all_images['z_score'].get_data(), z_image.get_data())
assert_array_equal(all_images['p_value'].get_data(), p_value.get_data())
assert_array_equal(all_images['stat'].get_data(), stat_image.get_data())
assert_array_equal(all_images['effect_size'].get_data(), effect_image.get_data())
assert_array_equal(all_images['effect_variance'].get_data(), variance_image.get_data())
# Delete objects attached to files to avoid WindowsError when deleting
# temporary directory (in Windows)
del (all_images,
design_matrices,
effect_image,
fmri_data,
mask,
multi_session_model,
n_voxels,
p_value,
rk,
shapes,
stat_image,
variance_image,
z_image,
)
def test_high_level_glm_with_paths():
# New API
shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 14)), 3
with InTemporaryDirectory():
mask_file, fmri_files, design_files = _write_fake_fmri_data(shapes, rk)
multi_session_model = FirstLevelModel(mask_img=None).fit(
fmri_files, design_matrices=design_files)
z_image = multi_session_model.compute_contrast(np.eye(rk)[1])
assert_array_equal(z_image.affine, load(mask_file).affine)
assert_true(z_image.get_data().std() < 3.)
# Delete objects attached to files to avoid WindowsError when deleting
# temporary directory (in Windows)
del z_image, fmri_files, multi_session_model
def test_high_level_glm_null_contrasts():
# test that contrast computation is resilient to 0 values.
# new API
shapes, rk = ((7, 8, 7, 15), (7, 8, 7, 19)), 3
mask, fmri_data, design_matrices = _generate_fake_fmri_data(shapes, rk)
multi_session_model = FirstLevelModel(mask_img=None).fit(
fmri_data, design_matrices=design_matrices)
single_session_model = FirstLevelModel(mask_img=None).fit(
fmri_data[0], design_matrices=design_matrices[0])
z1 = multi_session_model.compute_contrast([np.eye(rk)[:1],
np.zeros((1, rk))],
output_type='stat')
z2 = single_session_model.compute_contrast(np.eye(rk)[:1],
output_type='stat')
np.testing.assert_almost_equal(z1.get_data(), z2.get_data())
def test_run_glm():
# New API
n, p, q = 100, 80, 10
X, Y = np.random.randn(p, q), np.random.randn(p, n)
# Ordinary Least Squares case
labels, results = run_glm(Y, X, 'ols')
assert_array_equal(labels, np.zeros(n))
assert_equal(list(results.keys()), [0.0])
assert_equal(results[0.0].theta.shape, (q, n))
assert_almost_equal(results[0.0].theta.mean(), 0, 1)
assert_almost_equal(results[0.0].theta.var(), 1. / p, 1)
# ar(1) case
labels, results = run_glm(Y, X, 'ar1')
assert_equal(len(labels), n)
assert_true(len(results.keys()) > 1)
tmp = sum([val.theta.shape[1] for val in results.values()])
assert_equal(tmp, n)
# non-existant case
assert_raises(ValueError, run_glm, Y, X, 'ar2')
assert_raises(ValueError, run_glm, Y, X.T)
def test_scaling():
"""Test the scaling function"""
shape = (400, 10)
u = np.random.randn(*shape)
mean = 100 * np.random.rand(shape[1]) + 1
Y = u + mean
Y_, mean_ = mean_scaling(Y)
assert_almost_equal(Y_.mean(0), 0, 5)
assert_almost_equal(mean_, mean, 0)
assert_true(Y.std() > 1)
def test_fmri_inputs():
# Test processing of FMRI inputs
with InTemporaryDirectory():
shapes = ((7, 8, 9, 10),)
mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
FUNCFILE = FUNCFILE[0]
func_img = load(FUNCFILE)
T = func_img.shape[-1]
conf = pd.DataFrame([0, 0])
des = pd.DataFrame(np.ones((T, 1)), columns=[''])
des_fname = 'design.csv'
des.to_csv(des_fname)
for fi in func_img, FUNCFILE:
for d in des, des_fname:
FirstLevelModel().fit(fi, design_matrices=d)
FirstLevelModel(mask_img=None).fit([fi], design_matrices=d)
FirstLevelModel(mask_img=mask).fit(fi, design_matrices=[d])
FirstLevelModel(mask_img=mask).fit([fi], design_matrices=[d])
FirstLevelModel(mask_img=mask).fit([fi, fi], design_matrices=[d, d])
FirstLevelModel(mask_img=None).fit((fi, fi), design_matrices=(d, d))
assert_raises(
ValueError, FirstLevelModel(mask_img=None).fit, [fi, fi], d)
assert_raises(
ValueError, FirstLevelModel(mask_img=None).fit, fi, [d, d])
# At least paradigms or design have to be given
assert_raises(
ValueError, FirstLevelModel(mask_img=None).fit, fi)
# If paradigms are given then both tr and slice time ref were
# required
assert_raises(
ValueError, FirstLevelModel(mask_img=None).fit, fi, d)
assert_raises(
ValueError, FirstLevelModel(mask_img=None, t_r=1.0).fit, fi, d)
assert_raises(
ValueError, FirstLevelModel(mask_img=None, slice_time_ref=0.).fit, fi, d)
# confounds rows do not match n_scans
assert_raises(
ValueError, FirstLevelModel(mask_img=None).fit, fi, d, conf)
# Delete objects attached to files to avoid WindowsError when deleting
# temporary directory (in Windows)
del fi, func_img, mask, d, des, FUNCFILE, _
def basic_paradigm():
conditions = ['c0', 'c0', 'c0', 'c1', 'c1', 'c1', 'c2', 'c2', 'c2']
onsets = [30, 70, 100, 10, 30, 90, 30, 40, 60]
durations = 1 * np.ones(9)
events = pd.DataFrame({'trial_type': conditions,
'onset': onsets,
'duration': durations})
return events
def test_first_level_model_design_creation():
# Test processing of FMRI inputs
with InTemporaryDirectory():
shapes = ((7, 8, 9, 10),)
mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
FUNCFILE = FUNCFILE[0]
func_img = load(FUNCFILE)
# basic test based on basic_paradigm and glover hrf
t_r = 10.0
slice_time_ref = 0.
events = basic_paradigm()
model = FirstLevelModel(t_r, slice_time_ref, mask_img=mask,
drift_model='polynomial', drift_order=3)
model = model.fit(func_img, events)
frame1, X1, names1 = check_design_matrix(model.design_matrices_[0])
# check design computation is identical
n_scans = func_img.get_data().shape[3]
start_time = slice_time_ref * t_r
end_time = (n_scans - 1 + slice_time_ref) * t_r
frame_times = np.linspace(start_time, end_time, n_scans)
design = make_first_level_design_matrix(frame_times, events,
drift_model='polynomial', drift_order=3)
frame2, X2, names2 = check_design_matrix(design)
assert_array_equal(frame1, frame2)
assert_array_equal(X1, X2)
assert_array_equal(names1, names2)
# Delete objects attached to files to avoid WindowsError when deleting
# temporary directory (in Windows)
del FUNCFILE, mask, model, func_img
def test_first_level_model_glm_computation():
with InTemporaryDirectory():
shapes = ((7, 8, 9, 10),)
mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
FUNCFILE = FUNCFILE[0]
func_img = load(FUNCFILE)
# basic test based on basic_paradigm and glover hrf
t_r = 10.0
slice_time_ref = 0.
events = basic_paradigm()
# Ordinary Least Squares case
model = FirstLevelModel(t_r, slice_time_ref, mask_img=mask,
drift_model='polynomial', drift_order=3,
minimize_memory=False)
model = model.fit(func_img, events)
# Delete objects attached to files to avoid WindowsError when deleting
# temporary directory (in Windows)
del mask, FUNCFILE, func_img, model
def test_first_level_glm_computation_with_memory_caching():
with InTemporaryDirectory():
shapes = ((7, 8, 9, 10),)
mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
FUNCFILE = FUNCFILE[0]
func_img = load(FUNCFILE)
# initialize FirstLevelModel with memory option enabled
t_r = 10.0
slice_time_ref = 0.
events = basic_paradigm()
# Ordinary Least Squares case
model = FirstLevelModel(t_r, slice_time_ref, mask_img=mask,
drift_model='polynomial', drift_order=3,
memory='nilearn_cache', memory_level=1,
minimize_memory=False)
model.fit(func_img, events)
# Delete objects attached to files to avoid WindowsError when deleting
# temporary directory (in Windows)
del mask, func_img, FUNCFILE, model
def test_first_level_model_contrast_computation():
with InTemporaryDirectory():
shapes = ((7, 8, 9, 10),)
mask, FUNCFILE, _ = _write_fake_fmri_data(shapes)
FUNCFILE = FUNCFILE[0]
func_img = load(FUNCFILE)
# basic test based on basic_paradigm and glover hrf
t_r = 10.0
slice_time_ref = 0.
events = basic_paradigm()
# Ordinary Least Squares case
model = FirstLevelModel(t_r, slice_time_ref, mask_img=mask,
drift_model='polynomial', drift_order=3,
minimize_memory=False)
c1, c2, cnull = np.eye(7)[0], np.eye(7)[1], np.zeros(7)
# asking for contrast before model fit gives error
assert_raises(ValueError, model.compute_contrast, c1)
# fit model
model = model.fit([func_img, func_img], [events, events])
# smoke test for different contrasts in fixed effects
model.compute_contrast([c1, c2])
# smoke test for same contrast in fixed effects
model.compute_contrast([c2, c2])
# smoke test for contrast that will be repeated
model.compute_contrast(c2)
model.compute_contrast(c2, 'F')
model.compute_contrast(c2, 't', 'z_score')
model.compute_contrast(c2, 't', 'stat')
model.compute_contrast(c2, 't', 'p_value')
model.compute_contrast(c2, None, 'effect_size')
model.compute_contrast(c2, None, 'effect_variance')
# formula should work (passing varible name directly)
model.compute_contrast('c0')
model.compute_contrast('c1')
model.compute_contrast('c2')
# smoke test for one null contrast in group
model.compute_contrast([c2, cnull])
# only passing null contrasts should give back a value error
assert_raises(ValueError, model.compute_contrast, cnull)
assert_raises(ValueError, model.compute_contrast, [cnull, cnull])
# passing wrong parameters
assert_raises(ValueError, model.compute_contrast, [])
assert_raises(ValueError, model.compute_contrast, [c1, []])
assert_raises(ValueError, model.compute_contrast, c1, '', '')
assert_raises(ValueError, model.compute_contrast, c1, '', [])
# Delete objects attached to files to avoid WindowsError when deleting
# temporary directory (in Windows)
del func_img, FUNCFILE, model
def test_first_level_models_from_bids():
with InTemporaryDirectory():
bids_path = _create_fake_bids_dataset(n_sub=10, n_ses=2,
tasks=['localizer', 'main'],
n_runs=[1, 3])
# test arguments are provided correctly
assert_raises(TypeError, first_level_models_from_bids, 2, 'main', 'MNI')
assert_raises(ValueError, first_level_models_from_bids, 'lolo', 'main', 'MNI')
assert_raises(TypeError, first_level_models_from_bids, bids_path, 2, 'MNI')
assert_raises(TypeError, first_level_models_from_bids,
bids_path, 'main', 'MNI', model_init=[])
# test output is as expected
models, m_imgs, m_events, m_confounds = first_level_models_from_bids(
bids_path, 'main', 'MNI', [('variant', 'some')])
assert_true(len(models) == len(m_imgs))
assert_true(len(models) == len(m_events))
assert_true(len(models) == len(m_confounds))
# test repeated run tag error when run tag is in filenames
# can arise when variant or space is present and not specified
assert_raises(ValueError, first_level_models_from_bids,
bids_path, 'main', 'T1w') # variant not specified
# test more than one ses file error when run tag is not in filenames
# can arise when variant or space is present and not specified
assert_raises(ValueError, first_level_models_from_bids,
bids_path, 'localizer', 'T1w') # variant not specified
# test issues with confound files. There should be only one confound
# file per img. An one per image or None. Case when one is missing
confound_files = get_bids_files(os.path.join(bids_path, 'derivatives'),
file_tag='confounds')
os.remove(confound_files[-1])
assert_raises(ValueError, first_level_models_from_bids,
bids_path, 'main', 'MNI')
# test issues with event files
events_files = get_bids_files(bids_path, file_tag='events')
os.remove(events_files[0])
# one file missing
assert_raises(ValueError, first_level_models_from_bids,
bids_path, 'main', 'MNI')
for f in events_files[1:]:
os.remove(f)
# all files missing
assert_raises(ValueError, first_level_models_from_bids,
bids_path, 'main', 'MNI')
# In case different variant and spaces exist and are not selected we
# fail and ask for more specific information
shutil.rmtree(os.path.join(bids_path, 'derivatives'))
# issue if no derivatives folder is present
assert_raises(ValueError, first_level_models_from_bids,
bids_path, 'main', 'MNI')
# check runs are not repeated when ses field is not used
shutil.rmtree(bids_path)
bids_path = _create_fake_bids_dataset(n_sub=10, n_ses=1,
tasks=['localizer', 'main'],
n_runs=[1, 3], no_session=True)
# test repeated run tag error when run tag is in filenames and not ses
# can arise when variant or space is present and not specified
assert_raises(ValueError, first_level_models_from_bids,
bids_path, 'main', 'T1w') # variant not specified
def test_first_level_models_with_no_signal_scaling():
"""
test to ensure that the FirstLevelModel works correctly with a
signal_scaling==False. In particular, that derived theta are correct for a
constant design matrix with a single valued fmri image
"""
shapes, rk = [(3, 1, 1, 2)], 1
fmri_data = list()
design_matrices = list()
design_matrices.append(pd.DataFrame(np.ones((shapes[0][-1], rk)),
columns=list('abcdefghijklmnopqrstuvwxyz')[:rk]))
first_level_model = FirstLevelModel(mask_img=False, noise_model='ols', signal_scaling=False)
fmri_data.append(Nifti1Image(np.zeros((1, 1, 1, 2)) + 6, np.eye(4)))
first_level_model.fit(fmri_data, design_matrices=design_matrices)
# trivial test of signal_scaling value
assert_true(first_level_model.signal_scaling is False)
# assert that our design matrix has one constant
assert_true(first_level_model.design_matrices_[0].equals(
pd.DataFrame([1.0, 1.0], columns=['a'])))
# assert that we only have one theta as there is only on voxel in our image
assert_true(first_level_model.results_[0][0].theta.shape == (1, 1))
# assert that the theta is equal to the one voxel value
assert_almost_equal(first_level_model.results_[0][0].theta[0, 0], 6.0, 2)
def test_param_mask_deprecation_FirstLevelModel():
""" Tests whether use of deprecated keyword parameter `mask`
raises the correct warning & transfers its value to
replacement parameter `mask_img` correctly.
"""
deprecation_msg = (
'The parameter "mask" will be removed in next release of Nistats. '
'Please use the parameter "mask_img" instead.'
)
mask_filepath = '~/masks/mask_01.nii.gz'
with warnings.catch_warnings(record=True) as raised_warnings:
flm1 = FirstLevelModel(2.5,
1,
mask=mask_filepath,
target_shape=(2, 4, 4),
)
flm2 = FirstLevelModel(t_r=2.5,
slice_time_ref=1,
mask=mask_filepath,
target_shape=(2, 4, 4),
)
flm3 = FirstLevelModel(2.5, 0., 'glover', 'cosine', 128, 1, [0], -24,
mask_filepath, None, (2, 4, 4),
)
assert flm1.mask_img == mask_filepath
assert flm2.mask_img == mask_filepath
assert flm3.mask_img == mask_filepath
with assert_raises(AttributeError):
flm1.mask == mask_filepath
with assert_raises(AttributeError):
flm2.mask == mask_filepath
with assert_raises(AttributeError):
flm3.mask == mask_filepath
assert len(raised_warnings) == 2
raised_param_deprecation_warnings = [
raised_warning_ for raised_warning_
in raised_warnings if
str(raised_warning_.message).startswith('The parameter')
]
for param_warning_ in raised_param_deprecation_warnings:
assert str(param_warning_.message) == deprecation_msg
assert param_warning_.category is DeprecationWarning
def test_param_mask_deprecation_first_level_models_from_bids():
deprecation_msg = (
'The parameter "mask" will be removed in next release of Nistats. '
'Please use the parameter "mask_img" instead.'
)
mask_filepath = '~/masks/mask_01.nii.gz'
with InTemporaryDirectory():
bids_path = _create_fake_bids_dataset(n_sub=10, n_ses=2,
tasks=['localizer', 'main'],
n_runs=[1, 3])
with warnings.catch_warnings(record=True) as raised_warnings:
first_level_models_from_bids(
bids_path, 'main', 'MNI', [('variant', 'some')],
mask=mask_filepath)
first_level_models_from_bids(
bids_path, 'main', 'MNI', [('variant', 'some')],
mask_img=mask_filepath)
raised_param_deprecation_warnings = [
raised_warning_ for raised_warning_
in raised_warnings if
str(raised_warning_.message).startswith('The parameter')
]
assert len(raised_param_deprecation_warnings) == 1
for param_warning_ in raised_param_deprecation_warnings:
assert str(param_warning_.message) == deprecation_msg
assert param_warning_.category is DeprecationWarning
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
from .. import models
class AppServiceCertificateOrdersOperations(object):
"""AppServiceCertificateOrdersOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: API Version. Constant value: "2015-08-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-08-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""List all certificate orders in a subscription.
List all certificate orders in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AppServiceCertificateOrder
:rtype:
~azure.mgmt.web.models.AppServiceCertificateOrderPaged[~azure.mgmt.web.models.AppServiceCertificateOrder]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/certificateOrders'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.AppServiceCertificateOrderPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AppServiceCertificateOrderPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def validate_purchase_information(
self, app_service_certificate_order, custom_headers=None, raw=False, **operation_config):
"""Validate information for a certificate order.
Validate information for a certificate order.
:param app_service_certificate_order: Information for a certificate
order.
:type app_service_certificate_order:
~azure.mgmt.web.models.AppServiceCertificateOrder
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.CertificateRegistration/validateCertificateRegistrationInformation'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(app_service_certificate_order, 'AppServiceCertificateOrder')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Get certificate orders in a resource group.
Get certificate orders in a resource group.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AppServiceCertificateOrder
:rtype:
~azure.mgmt.web.models.AppServiceCertificateOrderPaged[~azure.mgmt.web.models.AppServiceCertificateOrder]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.AppServiceCertificateOrderPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AppServiceCertificateOrderPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get(
self, resource_group_name, certificate_order_name, custom_headers=None, raw=False, **operation_config):
"""Get a certificate order.
Get a certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order..
:type certificate_order_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AppServiceCertificateOrder or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.AppServiceCertificateOrder or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateOrder', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, certificate_order_name, certificate_distinguished_name, custom_headers=None, raw=False, **operation_config):
"""Create or update a certificate purchase order.
Create or update a certificate purchase order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param certificate_distinguished_name: Distinguished name to to use
for the certificate order.
:type certificate_distinguished_name:
~azure.mgmt.web.models.AppServiceCertificateOrder
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
AppServiceCertificateOrder or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.web.models.AppServiceCertificateOrder]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(certificate_distinguished_name, 'AppServiceCertificateOrder')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateOrder', response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateOrder', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, certificate_order_name, custom_headers=None, raw=False, **operation_config):
"""Delete an existing certificate order.
Delete an existing certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def update(
self, resource_group_name, certificate_order_name, certificate_distinguished_name, custom_headers=None, raw=False, **operation_config):
"""Create or update a certificate purchase order.
Create or update a certificate purchase order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param certificate_distinguished_name: Distinguished name to to use
for the certificate order.
:type certificate_distinguished_name:
~azure.mgmt.web.models.AppServiceCertificateOrderPatchResource
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AppServiceCertificateOrder or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.AppServiceCertificateOrder or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(certificate_distinguished_name, 'AppServiceCertificateOrderPatchResource')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateOrder', response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateOrder', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_certificates(
self, resource_group_name, certificate_order_name, custom_headers=None, raw=False, **operation_config):
"""List all certificates associated with a certificate order.
List all certificates associated with a certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of AppServiceCertificateResource
:rtype:
~azure.mgmt.web.models.AppServiceCertificateResourcePaged[~azure.mgmt.web.models.AppServiceCertificateResource]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.AppServiceCertificateResourcePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.AppServiceCertificateResourcePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get_certificate(
self, resource_group_name, certificate_order_name, name, custom_headers=None, raw=False, **operation_config):
"""Get the certificate associated with a certificate order.
Get the certificate associated with a certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AppServiceCertificateResource or ClientRawResponse if
raw=true
:rtype: ~azure.mgmt.web.models.AppServiceCertificateResource or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update_certificate(
self, resource_group_name, certificate_order_name, name, key_vault_certificate, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a certificate and associates with key vault secret.
Creates or updates a certificate and associates with key vault secret.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:param key_vault_certificate: Key vault certificate resource Id.
:type key_vault_certificate:
~azure.mgmt.web.models.AppServiceCertificateResource
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:return: An instance of AzureOperationPoller that returns
AppServiceCertificateResource or ClientRawResponse if raw=true
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.web.models.AppServiceCertificateResource]
or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(key_vault_certificate, 'AppServiceCertificateResource')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateResource', response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete_certificate(
self, resource_group_name, certificate_order_name, name, custom_headers=None, raw=False, **operation_config):
"""Delete the certificate associated with a certificate order.
Delete the certificate associated with a certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def update_certificate(
self, resource_group_name, certificate_order_name, name, key_vault_certificate, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a certificate and associates with key vault secret.
Creates or updates a certificate and associates with key vault secret.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the certificate.
:type name: str
:param key_vault_certificate: Key vault certificate resource Id.
:type key_vault_certificate:
~azure.mgmt.web.models.AppServiceCertificatePatchResource
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: AppServiceCertificateResource or ClientRawResponse if
raw=true
:rtype: ~azure.mgmt.web.models.AppServiceCertificateResource or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/certificates/{name}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(key_vault_certificate, 'AppServiceCertificatePatchResource')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AppServiceCertificateResource', response)
if response.status_code == 201:
deserialized = self._deserialize('AppServiceCertificateResource', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def reissue(
self, resource_group_name, certificate_order_name, reissue_certificate_order_request, custom_headers=None, raw=False, **operation_config):
"""Reissue an existing certificate order.
Reissue an existing certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param reissue_certificate_order_request: Parameters for the reissue.
:type reissue_certificate_order_request:
~azure.mgmt.web.models.ReissueCertificateOrderRequest
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/reissue'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(reissue_certificate_order_request, 'ReissueCertificateOrderRequest')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def renew(
self, resource_group_name, certificate_order_name, renew_certificate_order_request, custom_headers=None, raw=False, **operation_config):
"""Renew an existing certificate order.
Renew an existing certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param renew_certificate_order_request: Renew parameters
:type renew_certificate_order_request:
~azure.mgmt.web.models.RenewCertificateOrderRequest
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/renew'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(renew_certificate_order_request, 'RenewCertificateOrderRequest')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def resend_email(
self, resource_group_name, certificate_order_name, custom_headers=None, raw=False, **operation_config):
"""Resend certificate email.
Resend certificate email.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/resendEmail'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def resend_request_emails(
self, resource_group_name, certificate_order_name, name=None, custom_headers=None, raw=False, **operation_config):
"""Verify domain ownership for this certificate order.
Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param name: Name of the object.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
name_identifier = models.NameIdentifier(name=name)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/resendRequestEmails'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(name_identifier, 'NameIdentifier')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def retrieve_site_seal(
self, resource_group_name, certificate_order_name, light_theme=None, locale=None, custom_headers=None, raw=False, **operation_config):
"""Verify domain ownership for this certificate order.
Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param light_theme: If <code>true</code> use the light color theme for
site seal; otherwise, use the default color theme.
:type light_theme: bool
:param locale: Locale of site seal.
:type locale: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SiteSeal or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.web.models.SiteSeal or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
site_seal_request = models.SiteSealRequest(light_theme=light_theme, locale=locale)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/retrieveSiteSeal'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(site_seal_request, 'SiteSealRequest')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SiteSeal', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def verify_domain_ownership(
self, resource_group_name, certificate_order_name, custom_headers=None, raw=False, **operation_config):
"""Verify domain ownership for this certificate order.
Verify domain ownership for this certificate order.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param certificate_order_name: Name of the certificate order.
:type certificate_order_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{certificateOrderName}/verifyDomainOwnership'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'certificateOrderName': self._serialize.url("certificate_order_name", certificate_order_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def retrieve_certificate_actions(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Retrieve the list of certificate actions.
Retrieve the list of certificate actions.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param name: Name of the certificate order.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.mgmt.web.models.CertificateOrderAction] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveCertificateActions'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[CertificateOrderAction]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def retrieve_certificate_email_history(
self, resource_group_name, name, custom_headers=None, raw=False, **operation_config):
"""Retrieve email history.
Retrieve email history.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param name: Name of the certificate order.
:type name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.mgmt.web.models.CertificateEmail] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.CertificateRegistration/certificateOrders/{name}/retrieveEmailHistory'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[CertificateEmail]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| |
"""ASF Parser Plugin."""
#
# Copyright (c) 2007 Michael van Tellingen <michaelvantellingen@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Built-in modules
import datetime
__all__ = ['Parser']
# Project modules
import videoparser.plugins as plugins
import videoparser.streams as streams
# Only implement required information to retrieve video and audio information
guid_list = {
'D2D0A440-E307-11D2-97F0-00A0C95EA850':
'ASF_Extended_Content_Description_Object',
'75B22630-668E-11CF-A6D9-00AA0062CE6C': 'ASF_Header_Object',
'75B22633-668E-11CF-A6D9-00AA0062CE6C': 'ASF_Content_Description_Object',
'8CABDCA1-A947-11CF-8EE4-00C00C205365': 'ASF_File_Properties_Object',
'5FBF03B5-A92E-11CF-8EE3-00C00C205365': 'ASF_Header_Extension_Object',
'86D15240-311D-11D0-A3A4-00A0C90348F6': 'ASF_Codec_List_Object',
'B7DC0791-A9B7-11CF-8EE6-00C00C205365': 'ASF_Stream_Properties_Object',
'7BF875CE-468D-11D1-8D82-006097C9A2B2':
'ASF_Stream_Bitrate_Properties_Object',
'F8699E40-5B4D-11CF-A8FD-00805F5C442B': 'ASF_Audio_Media',
'BC19EFC0-5B4D-11CF-A8FD-00805F5C442B': 'ASF_Video_Media',
'BFC3CD50-618F-11CF-8BB2-00AA00B4E220': 'ASF_Audio_Spread',
'20FB5700-5B55-11CF-A8FD-00805F5C442B': 'ASF_No_Error_Correction',
'7C4346A9-EFE0-4BFC-B229-393EDE415C85': 'ASF_Language_List_Object',
'ABD3D211-A9BA-11cf-8EE6-00C00C205365': 'ASF_Reserved_1',
'C5F8CBEA-5BAF-4877-8467-AA8C44FA4CCA': 'ASF_Metadata_Object',
'14E6A5CB-C672-4332-8399-A96952065B5A':
'ASF_Extended_Stream_Properties_Object',
'D6E229DF-35DA-11D1-9034-00A0C90349BE': 'ASF_Index_Parameters_Object',
'D4FED15B-88D3-454F-81F0-ED5C45999E24': 'ASF_Stream_Prioritization_Object',
'1806D474-CADF-4509-A4BA-9AABCB96AAE8': 'ASF_Padding_Object',
}
class Parser(plugins.BaseParser):
_endianess = streams.endian.little
_file_types = ['wmv']
def __init__(self):
plugins.BaseParser.__init__(self)
def parse(self, filename, video):
stream = streams.factory.create_filestream(filename,
endianess=self._endianess)
object_id = stream.read_guid()
if guid_list.get(object_id) != 'ASF_Header_Object':
return False
try:
header = self.parse_header(stream)
except AssertionError:
return False
self.extract_information(header, video)
return True
def extract_information(self, header, video):
#print header
#print
#print
framerates = {}
video.set_container('ASF')
# Loop over all objects in the header, first search for the
# StreamProperties
for object in header.objects:
if isinstance(object, self.StreamProperties):
stream = video.get_stream(object.index)
type_data = object.type_data
if object.type == 'ASF_Audio_Media':
if not stream:
stream = video.new_audio_stream(object.index)
stream.set_channels(type_data.channels)
stream.set_sample_rate(type_data.sample_rate)
stream.set_codec(type_data.codec_ids.get(
type_data.codec_id, type_data.codec_id))
stream.set_bit_per_sample(type_data.bits_per_sample)
if object.type == 'ASF_Video_Media':
if not stream:
stream = video.new_video_stream(object.index)
stream.set_width(type_data.width)
stream.set_height(type_data.height)
stream.set_codec(type_data.format_data.compression_id)
for object in header.objects:
if isinstance(object, self.FileProperties):
for stream in video.video_streams:
stream.set_duration(seconds=object.play_duration.seconds,
microseconds= \
object.play_duration.microseconds)
# Extract additional information from the HeaderExtension
if isinstance(object, self.HeaderExtension):
for sub_object in object.extension_data:
if isinstance(sub_object, self.ExtendedStreamProperties):
# Framerate (required for video)
stream = video.get_stream(sub_object.stream_number)
if stream.type == 'Video':
stream.set_framerate(1 / (
sub_object.avg_time_per_frame / 10000000.0))
return video
def parse_header(self, stream):
# Read the header information
header = self.Header()
header.size = stream.read_uint64()
header.num_objects = stream.read_uint32()
header.reserved_1 = stream.read_uint8()
header.reserved_2 = stream.read_uint8()
header.objects = []
if header.reserved_2 != 0x02:
raise AssertionError('Reserved2 in Header Object should be 0x02')
# Loop through all objects contained in the header
for i in range(0, header.num_objects):
guid = stream.read_guid()
size = stream.read_uint64()
obj = None
try:
object_type = guid_list[guid]
except:
# Unrecognized object, skip over it
raise AssertionError("Unregognized object: %s" % guid)
stream.skip(size - 24)
continue
data = stream.read_subsegment(size - 24)
if object_type == 'ASF_Content_Description_Object':
obj = 'ASF_Content_Description_Object (TODO)'
elif object_type == 'ASF_Extended_Content_Description_Object':
obj = 'ASF_Extended_Content_Description_Object (TODO)'
elif object_type == 'ASF_File_Properties_Object':
obj = self.parse_file_properties(data)
elif object_type == 'ASF_Header_Extension_Object':
obj = self.parse_header_extension(data)
elif object_type == 'ASF_Codec_List_Object':
obj = self.parse_codec_list(data)
elif object_type == 'ASF_Stream_Properties_Object':
obj = self.parse_stream_properties(data)
elif object_type == 'ASF_Stream_Bitrate_Properties_Object':
obj = self.parse_stream_bitrate_properties(data)
else:
print "Warning: unhandled object: %s" % object_type
header.objects.append(obj)
data.close()
#print guid_list[guid], size
return header
# mandatory, one only
def parse_file_properties(self, data):
fileprop = self.FileProperties()
fileprop.id = data.read_guid()
fileprop.size = data.read_uint64()
fileprop.create_date = data.read_timestamp_win()
fileprop.packet_count = data.read_uint64()
fileprop.play_duration = datetime.timedelta(
microseconds=data.read_uint64()/10)
fileprop.send_duration = datetime.timedelta(
microseconds=data.read_uint64()/10)
fileprop.preroll = data.read_uint64()
# Flags
flags = data.read_uint32()
fileprop.broadcast_flag = flags & 0x01
fileprop.seekable_flag = (flags >> 1) & 0x01
fileprop.reserved = flags >> 2
fileprop.min_packet_size = data.read_uint32()
fileprop.max_packet_size = data.read_uint32()
fileprop.max_bitrate = data.read_uint32()
return fileprop
# mandatory, one only
def parse_stream_properties(self, data):
stream = self.StreamProperties()
stream.type = guid_list[data.read_guid()]
stream.ecc_type = guid_list[data.read_guid()]
stream.time_offset = data.read_uint64()
stream.type_length = data.read_uint32()
stream.ecc_length = data.read_uint32()
flags = data.read(2)
stream.index = ord(flags[0]) & 0x7f
stream.reserved = data.read(4)
type_data = data.read_subsegment(stream.type_length)
if stream.type == 'ASF_Audio_Media':
obj = type_data.read_waveformatex()
elif stream.type == 'ASF_Video_Media':
obj = self.VideoMedia()
obj.width = type_data.read_uint32()
obj.height = type_data.read_uint32()
obj.reserved_flags = type_data.read_byte()
obj.format_data_size = type_data.read_uint16()
obj.format_data = type_data.read_bitmapinfoheader()
else:
obj = None
stream.type_data = obj
stream.ecc_data = repr(data.read(stream.ecc_length))
return stream
# mandatory, one only
def parse_header_extension(self, data):
header = self.HeaderExtension()
header.reserved_1 = data.read_guid() # should be ASF_Reserved_1
header.reserved_2 = data.read_uint16() # should be 6
header.size = data.read_uint32()
header.extension_data = []
# Check reserved_1
bytes = header.size
while bytes > 0:
object_id = data.read_guid()
object_size = data.read_uint64()
bytes -= object_size
if object_size == 0:
continue
sub_data = data.read_subsegment(object_size - 24)
try:
object_type = guid_list[object_id]
except KeyError:
# Skip unknown guid's, since authors are allowed to create
# there own
#
#print "WARNING: object_id '%s' not found in guid_list" % \
# object_id
#header.extension_data.append(object_id)
continue
if object_type == 'ASF_Language_List_Object':
obj = self.parse_language_list(sub_data)
elif object_type == 'ASF_Metadata_Object':
obj = self.parse_metadata(sub_data)
elif object_type == 'ASF_Extended_Stream_Properties_Object':
obj = self.parse_extended_stream_properties(sub_data)
elif object_type == 'ASF_Stream_Prioritization_Object':
obj = self.parse_stream_prioritization(sub_data)
elif object_type == 'ASF_Padding_Object':
# Ignore the padding object, since it contains no information
continue
elif object_type == 'ASF_Index_Parameters_Object':
obj = 'ASF_Index_Parameters_Object (TODO)'
else:
raise AssertionError("object_type '%s' not processed in " +
"header_extension" % object_type)
#if obj is None:
# raise AssertionError("obj is None: %s" % object_type)
header.extension_data.append(obj)
return header
def parse_language_list(self, data):
obj = self.LanguageList()
obj.num_records = data.read_uint16()
obj.records = []
for i in range(0, obj.num_records):
language_id_length = data.read_uint8()
language_id = data.read_wchars(language_id_length / 2)
obj.records.append(language_id)
return obj
def parse_metadata(self, data):
return None
def parse_extended_stream_properties(self, data):
obj = self.ExtendedStreamProperties()
obj.start_time = data.read_uint64()
obj.end_time = data.read_uint64()
obj.data_bitrate = data.read_uint32()
obj.buffer_size = data.read_uint32()
obj.initial_buffer_fullness = data.read_uint32()
obj.alt_data_bitrate = data.read_uint32()
obj.alt_buffer_size = data.read_uint32()
obj.alt_initial_buffer_fullness = data.read_uint32()
obj.max_object_size = data.read_uint32()
# Parse flags
flags = data.read_uint32()
obj.reliable_flag = flags & 0x01
obj.seekable_flag = (flags >> 1) & 0x01
obj.no_cleanpoints_flag = (flags >> 2) & 0x01
obj.resend_cleanpoints_flag = (flags >> 3) & 0x01
obj.reserved_flags = flags >> 4
obj.stream_number = data.read_uint16()
obj.stream_language_id = data.read_uint16()
obj.avg_time_per_frame = data.read_uint64()
obj.stream_name_length = data.read_uint16()
obj.payload_extension_length = data.read_uint16()
obj.stream_names = None
obj.payload_extensions = None
obj.stream_properties_object = None
return obj
def parse_stream_prioritization(self, data):
return None
# Optional, one only
def parse_codec_list(self, data):
codeclist = self.CodecList()
codeclist.reserved = data.read_guid()
codeclist.num_codecs = data.read_uint32()
codeclist.codec_entries = []
for i in range(0, codeclist.num_codecs):
entry = self.CodecEntry()
entry.type = data.read_uint16()
entry.name_length = data.read_uint16()
entry.name = data.read_wchars(entry.name_length,
null_terminated=True)
entry.description_length = data.read_uint16()
entry.description = data.read_wchars(entry.description_length,
null_terminated=True)
entry.information_length = data.read_uint16()
entry.information = repr(data.read(entry.information_length))
codeclist.codec_entries.append(entry)
return codeclist
# Optional but recommended, one only
def parse_stream_bitrate_properties(self, data):
bitratelist = self.StreamBitrateProperties()
bitratelist.num_records = data.read_uint16()
bitratelist.records = []
for i in range(0, bitratelist.num_records):
entry = self.StreamBitrateRecord()
flags = data.read(2)
entry.stream_index = ord(flags[0]) & 0x7f
entry.reserved = chr(ord(flags[0]) & 0x80) + flags[1]
entry.avg_bitrate = data.read_uint32()
bitratelist.records.append(entry)
return bitratelist
#
# Objects to represent internal structure of the ASF File for debuging
#
class Structure(object):
def repr_childs(self, obj):
buffer = ""
for entry in obj:
buffer += "\n".join([" %s" % line for line
in repr(entry).split('\n')])
buffer += "\n"
return buffer
class Header(Structure):
__slots__ = ['size', 'num_objects', 'reserved_1', 'reserved_2',
'objects']
def __repr__(self):
buffer = "ASF_Header_Object Structure: \n"
buffer += " %-30s: %s\n" % ('Object Size', self.size)
buffer += " %-30s: %s\n" % ('Number of Header Objects',
self.num_objects)
buffer += " %-30s: %s\n" % ('Reserved1', repr(self.reserved_1))
buffer += " %-30s: %s\n" % ('Reserved2', repr(self.reserved_2))
buffer += self.repr_childs(self.objects)
return buffer
class VideoMedia(Structure):
__slots__ = ['width', 'height', 'reserved_flags', 'format_data_size',
'format_data']
def __repr__(self):
buffer = "ASF_Video_Media Structure: \n"
buffer += " %-30s: %s\n" % ('Encoded Image Width', self.width)
buffer += " %-30s: %s\n" % ('Encoded Image Height', self.height)
buffer += " %-30s: %s\n" % ('Reserved Flags',
repr(self.reserved_flags))
buffer += " %-30s: %s\n" % ('Format Data Size',
self.format_data_size)
buffer += " %-30s\n" % ('Format Data')
buffer += self.repr_childs([self.format_data])
return buffer
class LanguageList(Structure):
__slots__ = ['num_records', 'records']
def __repr__(self):
buffer = "ASF_Language_List_Object: \n"
buffer += " %-30s: %s\n" % ('Language ID Records Count',
self.num_records)
buffer += self.repr_childs([self.records])
return buffer
class FileProperties(Structure):
__slots__ = ['id', 'size', 'create_data', 'packet_count',
'play_duration', 'send_duration', 'preroll',
'broadcast_flag', 'seekable_flag', 'reserved',
'min_packet_size', 'max_packet_size', 'max_bitrate']
def __repr__(self):
buffer = "FileProperties Structure: \n"
buffer += " %-30s: %s\n" % ('File ID', self.id)
buffer += " %-30s: %s\n" % ('File Size', self.size)
buffer += " %-30s: %s\n" % ('Creation Date', self.create_date)
buffer += " %-30s: %s\n" % ('Data Packets Count',
self.packet_count)
buffer += " %-30s: %s\n" % ('Play Duration', self.play_duration)
buffer += " %-30s: %s\n" % ('Send Duration', self.send_duration)
buffer += " %-30s: %s\n" % ('Preroll', repr(self.preroll))
buffer += " %-30s: %s\n" % ('Broadcast Flag', self.broadcast_flag)
buffer += " %-30s: %s\n" % ('Seekable Flag', self.seekable_flag)
buffer += " %-30s: %s\n" % ('Reserved', repr(self.reserved))
buffer += " %-30s: %s\n" % ('Minimum Data Packet Size',
self.min_packet_size)
buffer += " %-30s: %s\n" % ('Maximum Data Packet Size',
self.max_packet_size)
buffer += " %-30s: %s\n" % ('Maximum Bitrate',
self.max_bitrate)
return buffer
class HeaderExtension(Structure):
def __repr__(self):
buffer = "HeaderExtension Structure: \n"
buffer += " %-30s: %s\n" % ('Reserved_1', self.reserved_1)
buffer += " %-30s: %s\n" % ('Reserved_2', self.reserved_2)
buffer += " %-30s: %s\n" % ('Header Extension Data Size',
self.size)
buffer += " %-30s\n" % ('Header Extension Data')
buffer += self.repr_childs(self.extension_data)
return buffer
class StreamProperties(Structure):
__slots__ = ['type', 'ecc_type', 'time_offset', 'type_length',
'ecc_length', 'index', 'reserved', 'type_data',
'ecc_data']
def __repr__(self):
buffer = "StreamProperties Structure: \n"
buffer += " %-30s: %s\n" % ('Stream Type', self.type)
buffer += " %-30s: %s\n" % ('Error Correction Type', self.ecc_type)
buffer += " %-30s: %s\n" % ('Time Offset', self.time_offset)
buffer += " %-30s: %s\n" % ('Type-Specific Data Length',
self.type_length)
buffer += " %-30s: %s\n" % ('Error Correction Data Length',
self.ecc_length)
buffer += " %-30s: %s\n" % ('Stream Index', self.index)
buffer += " %-30s: %s\n" % ('Reserved', repr(self.reserved))
buffer += " %-30s\n" % ('Type-Specific Data')
buffer += self.repr_childs([self.type_data])
buffer += " %-30s: %s\n" % ('Error Correction Data', self.ecc_data)
return buffer
class StreamBitrateRecord(Structure):
__slots__ = ['stream_index', 'reserved', 'avg_bitrate']
def __repr__(self):
buffer = "StreamBitrateRecord Structure: \n"
buffer += " %-30s: %s\n" % ('Stream number', self.stream_index)
buffer += " %-30s: %r\n" % ('Reserved', self.reserved)
buffer += " %-30s: %s\n" % ('Average Bitrate', self.avg_bitrate)
return buffer
class StreamBitrateProperties(Structure):
__slots__ = ['num_records', 'records']
def __repr__(self):
buffer = "StreamBitrateProperties Structure: \n"
buffer += " %-30s: %s\n" % ('Bitrate Entries Count',
self.num_records)
buffer += " %-30s\n" % ('Codec Entries')
buffer += self.repr_childs(self.records)
return buffer
class CodecList(Structure):
__slots__ = ['reserved', 'num_codecs', 'codec_entries']
def __repr__(self):
buffer = "CodecList Structure: \n"
buffer += " %-30s: %s\n" % ('Reserved', self.reserved)
buffer += " %-30s: %s\n" % ('Codec Entries Count', self.num_codecs)
buffer += " %-30s\n" % ('Codec Entries')
buffer += self.repr_childs(self.codec_entries)
return buffer
class CodecEntry(Structure):
__slots__ = ['type', 'name_length', 'name', 'description_length',
'description', 'information_length', 'information']
def __repr__(self):
buffer = "CodecEntry Structure: \n"
buffer += " %-30s: %s\n" % ('Type', self.type)
buffer += " %-30s: %s\n" % ('Codec Name Length', self.name_length)
buffer += " %-30s: %s\n" % ('Codecx Name', repr(self.name))
buffer += " %-30s: %s\n" % ('Codec Description Length',
self.description_length)
buffer += " %-30s: %s\n" % ('Codec Description',
repr(self.description))
buffer += " %-30s: %s\n" % ('Codec Information Length',
self.information_length)
buffer += " %-30s: %s\n" % ('Codec Information', self.information)
return buffer
class ExtendedStreamProperties(Structure):
__slots__ = ['start_time', 'end_time', 'data_bitrate', 'buffer_size',
'initial_buffer_fullness', 'alt_data_bitrate',
'alt_buffer_size', 'alt_initial_buffer_fullness',
'max_object_size', 'reliable_flag', 'seekable_flag',
'no_cleanpoints_flag', 'resend_cleanpoints_flag',
'reserved_flags', 'stream_number', 'stream_language_id',
'avg_time_per_frame', 'stream_name_length',
'payload_extension_length', 'stream_names',
'payload_extensions', 'stream_properties_object']
def __repr__(self):
buffer = "ExtendedStreamProperties Structure: \n"
buffer += " %-30s: %s\n" % ('Start Time', self.start_time)
buffer += " %-30s: %s\n" % ('End Time', self.end_time)
buffer += " %-30s: %s\n" % ('Data Bitrate', self.data_bitrate)
buffer += " %-30s: %s\n" % ('Buffer Size', self.buffer_size)
buffer += " %-30s: %s\n" % ('Initial Buffer Fullness',
self.initial_buffer_fullness)
buffer += " %-30s: %s\n" % ('Alternate Data Bitrate',
self.alt_data_bitrate)
buffer += " %-30s: %s\n" % ('Alternate Buffer Size',
self.alt_buffer_size)
buffer += " %-30s: %s\n" % ('Alternate Initial Buffer Fullness',
self.alt_initial_buffer_fullness)
buffer += " %-30s: %s\n" % ('Maximum Object Size',
self.max_object_size)
buffer += " %-30s: %s\n" % ('Reliable Flag', self.reliable_flag)
buffer += " %-30s: %s\n" % ('Seekable Flag',
self.seekable_flag)
buffer += " %-30s: %s\n" % ('No Cleanpoints Flag',
self.no_cleanpoints_flag)
buffer += " %-30s: %s\n" % ('Resend Live Cleanpoints Flag',
self.resend_cleanpoints_flag)
buffer += " %-30s: %s\n" % ('Reserved Flags', self.reserved_flags)
buffer += " %-30s: %s\n" % ('Stream Number', self.stream_number)
buffer += " %-30s: %s\n" % ('Stream Language ID Index',
self.stream_language_id)
buffer += " %-30s: %s\n" % ('Average Time Per Frame',
self.avg_time_per_frame)
buffer += " %-30s: %s\n" % ('Stream Name Count',
self.stream_name_length)
buffer += " %-30s: %s\n" % ('Payload Extension System Count',
self.payload_extension_length)
buffer += " %-30s: %s\n" % ('Stream Names', self.stream_names)
buffer += " %-30s: %s\n" % ('Payload Extension Systems',
self.payload_extensions)
buffer += " %-30s: %s\n" % ('Stream Properties Object',
self.stream_properties_object)
return buffer
| |
import asyncio
import atexit
import gc
import logging
import os
import signal
import sys
import warnings
from contextlib import suppress
import click
from tlz import valmap
from tornado.ioloop import IOLoop, TimeoutError
import dask
from dask.system import CPU_COUNT
from distributed import Nanny
from distributed.cli.utils import check_python_3, install_signal_handlers
from distributed.comm import get_address_host_port
from distributed.deploy.utils import nprocesses_nthreads
from distributed.preloading import validate_preload_argv
from distributed.proctitle import (
enable_proctitle_on_children,
enable_proctitle_on_current,
)
from distributed.utils import import_term
logger = logging.getLogger("distributed.dask_worker")
pem_file_option_type = click.Path(exists=True, resolve_path=True)
@click.command(context_settings=dict(ignore_unknown_options=True))
@click.argument("scheduler", type=str, required=False)
@click.option(
"--tls-ca-file",
type=pem_file_option_type,
default=None,
help="CA cert(s) file for TLS (in PEM format)",
)
@click.option(
"--tls-cert",
type=pem_file_option_type,
default=None,
help="certificate file for TLS (in PEM format)",
)
@click.option(
"--tls-key",
type=pem_file_option_type,
default=None,
help="private key file for TLS (in PEM format)",
)
@click.option(
"--worker-port",
default=None,
help="Serving computation port, defaults to random. "
"When creating multiple workers with --nworkers, a sequential range of "
"worker ports may be used by specifying the first and last available "
"ports like <first-port>:<last-port>. For example, --worker-port=3000:3026 "
"will use ports 3000, 3001, ..., 3025, 3026.",
)
@click.option(
"--nanny-port",
default=None,
help="Serving nanny port, defaults to random. "
"When creating multiple nannies with --nworkers, a sequential range of "
"nanny ports may be used by specifying the first and last available "
"ports like <first-port>:<last-port>. For example, --nanny-port=3000:3026 "
"will use ports 3000, 3001, ..., 3025, 3026.",
)
@click.option(
"--bokeh-port", type=int, default=None, help="Deprecated. See --dashboard-address"
)
@click.option(
"--dashboard-address",
type=str,
default=":0",
help="Address on which to listen for diagnostics dashboard",
)
@click.option(
"--dashboard/--no-dashboard",
"dashboard",
default=True,
required=False,
help="Launch the Dashboard [default: --dashboard]",
)
@click.option(
"--bokeh/--no-bokeh",
"bokeh",
default=None,
help="Deprecated. See --dashboard/--no-dashboard.",
required=False,
)
@click.option(
"--listen-address",
type=str,
default=None,
help="The address to which the worker binds. Example: tcp://0.0.0.0:9000 or tcp://:9000 for IPv4+IPv6",
)
@click.option(
"--contact-address",
type=str,
default=None,
help="The address the worker advertises to the scheduler for "
"communication with it and other workers. "
"Example: tcp://127.0.0.1:9000",
)
@click.option(
"--host",
type=str,
default=None,
help="Serving host. Should be an ip address that is"
" visible to the scheduler and other workers. "
"See --listen-address and --contact-address if you "
"need different listen and contact addresses. "
"See --interface.",
)
@click.option(
"--interface", type=str, default=None, help="Network interface like 'eth0' or 'ib0'"
)
@click.option(
"--protocol", type=str, default=None, help="Protocol like tcp, tls, or ucx"
)
@click.option("--nthreads", type=int, default=0, help="Number of threads per process.")
@click.option(
"--nprocs",
type=str,
default=None,
show_default=True,
help="Deprecated. Use '--nworkers' instead. Number of worker processes to "
"launch. If negative, then (CPU_COUNT + 1 + nprocs) is used. "
"Set to 'auto' to set nprocs and nthreads dynamically based on CPU_COUNT",
)
@click.option(
"--nworkers",
"n_workers", # This sets the Python argument name
type=str,
default=None,
show_default=True,
help="Number of worker processes to launch. "
"If negative, then (CPU_COUNT + 1 + nworkers) is used. "
"Set to 'auto' to set nworkers and nthreads dynamically based on CPU_COUNT",
)
@click.option(
"--name",
type=str,
default=None,
help="A unique name for this worker like 'worker-1'. "
"If used with --nworkers then the process number "
"will be appended like name-0, name-1, name-2, ...",
)
@click.option(
"--memory-limit",
default="auto",
show_default=True,
help="""\b
Bytes of memory per process that the worker can use.
This can be:
- an integer (bytes), note 0 is a special case for no memory management.
- a float (fraction of total system memory).
- a string (like 5GB or 5000M).
- 'auto' for automatically computing the memory limit.
""",
)
@click.option(
"--reconnect/--no-reconnect",
default=True,
help="Reconnect to scheduler if disconnected [default: --reconnect]",
)
@click.option(
"--nanny/--no-nanny",
default=True,
help="Start workers in nanny process for management [default: --nanny]",
)
@click.option("--pid-file", type=str, default="", help="File to write the process PID")
@click.option(
"--local-directory", default=None, type=str, help="Directory to place worker files"
)
@click.option(
"--resources",
type=str,
default=None,
help='Resources for task constraints like "GPU=2 MEM=10e9". '
"Resources are applied separately to each worker process "
"(only relevant when starting multiple worker processes with '--nworkers').",
)
@click.option(
"--scheduler-file",
type=str,
default=None,
help="Filename to JSON encoded scheduler information. "
"Use with dask-scheduler --scheduler-file",
)
@click.option(
"--death-timeout",
type=str,
default=None,
help="Seconds to wait for a scheduler before closing",
)
@click.option(
"--dashboard-prefix", type=str, default="", help="Prefix for the dashboard"
)
@click.option(
"--lifetime",
type=str,
default=None,
help="If provided, shut down the worker after this duration.",
)
@click.option(
"--lifetime-stagger",
type=str,
default="0 seconds",
show_default=True,
help="Random amount by which to stagger lifetime values",
)
@click.option(
"--worker-class",
type=str,
default="dask.distributed.Worker",
show_default=True,
help="Worker class used to instantiate workers from.",
)
@click.option(
"--lifetime-restart/--no-lifetime-restart",
"lifetime_restart",
default=False,
show_default=True,
required=False,
help="Whether or not to restart the worker after the lifetime lapses. "
"This assumes that you are using the --lifetime and --nanny keywords",
)
@click.option(
"--preload",
type=str,
multiple=True,
is_eager=True,
help="Module that should be loaded by each worker process "
'like "foo.bar" or "/path/to/foo.py"',
)
@click.argument(
"preload_argv", nargs=-1, type=click.UNPROCESSED, callback=validate_preload_argv
)
@click.option(
"--preload-nanny",
type=str,
multiple=True,
is_eager=True,
help="Module that should be loaded by each nanny "
'like "foo.bar" or "/path/to/foo.py"',
)
@click.version_option()
def main(
scheduler,
host,
worker_port,
listen_address,
contact_address,
nanny_port,
nthreads,
nprocs,
n_workers,
nanny,
name,
pid_file,
resources,
dashboard,
bokeh,
bokeh_port,
scheduler_file,
dashboard_prefix,
tls_ca_file,
tls_cert,
tls_key,
dashboard_address,
worker_class,
preload_nanny,
**kwargs,
):
g0, g1, g2 = gc.get_threshold() # https://github.com/dask/distributed/issues/1653
gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)
enable_proctitle_on_current()
enable_proctitle_on_children()
if bokeh_port is not None: # pragma: no cover
warnings.warn(
"The --bokeh-port flag has been renamed to --dashboard-address. "
"Consider adding ``--dashboard-address :%d`` " % bokeh_port
)
dashboard_address = bokeh_port
if bokeh is not None:
warnings.warn(
"The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. "
)
dashboard = bokeh
sec = {
k: v
for k, v in [
("tls_ca_file", tls_ca_file),
("tls_worker_cert", tls_cert),
("tls_worker_key", tls_key),
]
if v is not None
}
if nprocs is not None and n_workers is not None:
logger.error(
"Both --nprocs and --nworkers were specified. Use --nworkers only."
)
sys.exit(1)
elif nprocs is not None:
warnings.warn(
"The --nprocs flag will be removed in a future release. It has been "
"renamed to --nworkers.",
FutureWarning,
)
n_workers = nprocs
if n_workers == "auto":
n_workers, nthreads = nprocesses_nthreads()
elif n_workers is None:
n_workers = 1
else:
n_workers = int(n_workers)
if n_workers < 0:
n_workers = CPU_COUNT + 1 + n_workers
if n_workers <= 0:
logger.error(
"Failed to launch worker. Must specify --nworkers so that there's at least one process."
)
sys.exit(1)
if n_workers > 1 and not nanny:
logger.error(
"Failed to launch worker. You cannot use the --no-nanny argument when n_workers > 1."
)
sys.exit(1)
if contact_address and not listen_address:
logger.error(
"Failed to launch worker. "
"Must specify --listen-address when --contact-address is given"
)
sys.exit(1)
if n_workers > 1 and listen_address:
logger.error(
"Failed to launch worker. "
"You cannot specify --listen-address when n_workers > 1."
)
sys.exit(1)
if (worker_port or host) and listen_address:
logger.error(
"Failed to launch worker. "
"You cannot specify --listen-address when --worker-port or --host is given."
)
sys.exit(1)
try:
if listen_address:
(host, worker_port) = get_address_host_port(listen_address, strict=True)
if ":" in host:
# IPv6 -- bracket to pass as user args
host = f"[{host}]"
if contact_address:
# we only need this to verify it is getting parsed
(_, _) = get_address_host_port(contact_address, strict=True)
else:
# if contact address is not present we use the listen_address for contact
contact_address = listen_address
except ValueError as e: # pragma: no cover
logger.error("Failed to launch worker. " + str(e))
sys.exit(1)
if nanny:
port = nanny_port
else:
port = worker_port
if not nthreads:
nthreads = CPU_COUNT // n_workers
if pid_file:
with open(pid_file, "w") as f:
f.write(str(os.getpid()))
def del_pid_file():
if os.path.exists(pid_file):
os.remove(pid_file)
atexit.register(del_pid_file)
if resources:
resources = resources.replace(",", " ").split()
resources = dict(pair.split("=") for pair in resources)
resources = valmap(float, resources)
else:
resources = None
loop = IOLoop.current()
worker_class = import_term(worker_class)
if nanny:
kwargs["worker_class"] = worker_class
kwargs["preload_nanny"] = preload_nanny
if nanny:
kwargs.update({"worker_port": worker_port, "listen_address": listen_address})
t = Nanny
else:
if nanny_port:
kwargs["service_ports"] = {"nanny": nanny_port}
t = worker_class
if (
not scheduler
and not scheduler_file
and dask.config.get("scheduler-address", None) is None
):
raise ValueError(
"Need to provide scheduler address like\n"
"dask-worker SCHEDULER_ADDRESS:8786"
)
with suppress(TypeError, ValueError):
name = int(name)
nannies = [
t(
scheduler,
scheduler_file=scheduler_file,
nthreads=nthreads,
loop=loop,
resources=resources,
security=sec,
contact_address=contact_address,
host=host,
port=port,
dashboard=dashboard,
dashboard_address=dashboard_address,
name=name
if n_workers == 1 or name is None or name == ""
else str(name) + "-" + str(i),
**kwargs,
)
for i in range(n_workers)
]
async def close_all():
# Unregister all workers from scheduler
if nanny:
await asyncio.gather(*(n.close(timeout=2) for n in nannies))
signal_fired = False
def on_signal(signum):
nonlocal signal_fired
signal_fired = True
if signum != signal.SIGINT:
logger.info("Exiting on signal %d", signum)
return asyncio.ensure_future(close_all())
async def run():
await asyncio.gather(*nannies)
await asyncio.gather(*(n.finished() for n in nannies))
install_signal_handlers(loop, cleanup=on_signal)
try:
loop.run_sync(run)
except TimeoutError:
# We already log the exception in nanny / worker. Don't do it again.
if not signal_fired:
logger.info("Timed out starting worker")
sys.exit(1)
except KeyboardInterrupt: # pragma: no cover
pass
finally:
logger.info("End worker")
def go():
check_python_3()
main()
if __name__ == "__main__":
go() # pragma: no cover
| |
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
import errno
import os
import socket
import sys
import six
from ._exceptions import *
from ._logging import *
from ._socket import*
from ._ssl_compat import *
from ._url import *
if six.PY3:
from base64 import encodebytes as base64encode
else:
from base64 import encodestring as base64encode
__all__ = ["proxy_info", "connect", "read_headers"]
try:
import socks
ProxyConnectionError = socks.ProxyConnectionError
HAS_PYSOCKS = True
except:
class ProxyConnectionError(BaseException):
pass
HAS_PYSOCKS = False
class proxy_info(object):
def __init__(self, **options):
self.type = options.get("proxy_type") or "http"
if not(self.type in ['http', 'socks4', 'socks5', 'socks5h']):
raise ValueError("proxy_type must be 'http', 'socks4', 'socks5' or 'socks5h'")
self.host = options.get("http_proxy_host", None)
if self.host:
self.port = options.get("http_proxy_port", 0)
self.auth = options.get("http_proxy_auth", None)
self.no_proxy = options.get("http_no_proxy", None)
else:
self.port = 0
self.auth = None
self.no_proxy = None
def _open_proxied_socket(url, options, proxy):
hostname, port, resource, is_secure = parse_url(url)
if not HAS_PYSOCKS:
raise WebSocketException("PySocks module not found.")
ptype = socks.SOCKS5
rdns = False
if proxy.type == "socks4":
ptype = socks.SOCKS4
if proxy.type == "http":
ptype = socks.HTTP
if proxy.type[-1] == "h":
rdns = True
sock = socks.create_connection(
(hostname, port),
proxy_type = ptype,
proxy_addr = proxy.host,
proxy_port = proxy.port,
proxy_rdns = rdns,
proxy_username = proxy.auth[0] if proxy.auth else None,
proxy_password = proxy.auth[1] if proxy.auth else None,
timeout = options.timeout,
socket_options = DEFAULT_SOCKET_OPTION + options.sockopt
)
if is_secure:
if HAVE_SSL:
sock = _ssl_socket(sock, options.sslopt, hostname)
else:
raise WebSocketException("SSL not available.")
return sock, (hostname, port, resource)
def connect(url, options, proxy, socket):
if proxy.host and not socket and not (proxy.type == 'http'):
return _open_proxied_socket(url, options, proxy)
hostname, port, resource, is_secure = parse_url(url)
if socket:
return socket, (hostname, port, resource)
addrinfo_list, need_tunnel, auth = _get_addrinfo_list(
hostname, port, is_secure, proxy)
if not addrinfo_list:
raise WebSocketException(
"Host not found.: " + hostname + ":" + str(port))
sock = None
try:
sock = _open_socket(addrinfo_list, options.sockopt, options.timeout)
if need_tunnel:
sock = _tunnel(sock, hostname, port, auth)
if is_secure:
if HAVE_SSL:
sock = _ssl_socket(sock, options.sslopt, hostname)
else:
raise WebSocketException("SSL not available.")
return sock, (hostname, port, resource)
except:
if sock:
sock.close()
raise
def _get_addrinfo_list(hostname, port, is_secure, proxy):
phost, pport, pauth = get_proxy_info(
hostname, is_secure, proxy.host, proxy.port, proxy.auth, proxy.no_proxy)
try:
if not phost:
addrinfo_list = socket.getaddrinfo(
hostname, port, 0, 0, socket.SOL_TCP)
return addrinfo_list, False, None
else:
pport = pport and pport or 80
# when running on windows 10, the getaddrinfo used above
# returns a socktype 0. This generates an error exception:
#_on_error: exception Socket type must be stream or datagram, not 0
# Force the socket type to SOCK_STREAM
addrinfo_list = socket.getaddrinfo(phost, pport, 0, socket.SOCK_STREAM, socket.SOL_TCP)
return addrinfo_list, True, pauth
except socket.gaierror as e:
raise WebSocketAddressException(e)
def _open_socket(addrinfo_list, sockopt, timeout):
err = None
for addrinfo in addrinfo_list:
family, socktype, proto = addrinfo[:3]
sock = socket.socket(family, socktype, proto)
sock.settimeout(timeout)
for opts in DEFAULT_SOCKET_OPTION:
sock.setsockopt(*opts)
for opts in sockopt:
sock.setsockopt(*opts)
address = addrinfo[4]
err = None
while not err:
try:
sock.connect(address)
except ProxyConnectionError as error:
err = WebSocketProxyException(str(error))
err.remote_ip = str(address[0])
continue
except socket.error as error:
error.remote_ip = str(address[0])
try:
eConnRefused = (errno.ECONNREFUSED, errno.WSAECONNREFUSED)
except:
eConnRefused = (errno.ECONNREFUSED, )
if error.errno == errno.EINTR:
continue
elif error.errno in eConnRefused:
err = error
continue
else:
raise error
else:
break
else:
continue
break
else:
if err:
raise err
return sock
def _can_use_sni():
return six.PY2 and sys.version_info >= (2, 7, 9) or sys.version_info >= (3, 2)
def _wrap_sni_socket(sock, sslopt, hostname, check_hostname):
context = ssl.SSLContext(sslopt.get('ssl_version', ssl.PROTOCOL_SSLv23))
if sslopt.get('cert_reqs', ssl.CERT_NONE) != ssl.CERT_NONE:
cafile = sslopt.get('ca_certs', None)
capath = sslopt.get('ca_cert_path', None)
if cafile or capath:
context.load_verify_locations(cafile=cafile, capath=capath)
elif hasattr(context, 'load_default_certs'):
context.load_default_certs(ssl.Purpose.SERVER_AUTH)
if sslopt.get('certfile', None):
context.load_cert_chain(
sslopt['certfile'],
sslopt.get('keyfile', None),
sslopt.get('password', None),
)
# see
# https://github.com/liris/websocket-client/commit/b96a2e8fa765753e82eea531adb19716b52ca3ca#commitcomment-10803153
context.verify_mode = sslopt['cert_reqs']
if HAVE_CONTEXT_CHECK_HOSTNAME:
context.check_hostname = check_hostname
if 'ciphers' in sslopt:
context.set_ciphers(sslopt['ciphers'])
if 'cert_chain' in sslopt:
certfile, keyfile, password = sslopt['cert_chain']
context.load_cert_chain(certfile, keyfile, password)
if 'ecdh_curve' in sslopt:
context.set_ecdh_curve(sslopt['ecdh_curve'])
return context.wrap_socket(
sock,
do_handshake_on_connect=sslopt.get('do_handshake_on_connect', True),
suppress_ragged_eofs=sslopt.get('suppress_ragged_eofs', True),
server_hostname=hostname,
)
def _ssl_socket(sock, user_sslopt, hostname):
sslopt = dict(cert_reqs=ssl.CERT_REQUIRED)
sslopt.update(user_sslopt)
certPath = os.environ.get('WEBSOCKET_CLIENT_CA_BUNDLE')
if certPath and os.path.isfile(certPath) \
and user_sslopt.get('ca_certs', None) is None \
and user_sslopt.get('ca_cert', None) is None:
sslopt['ca_certs'] = certPath
elif certPath and os.path.isdir(certPath) \
and user_sslopt.get('ca_cert_path', None) is None:
sslopt['ca_cert_path'] = certPath
check_hostname = sslopt["cert_reqs"] != ssl.CERT_NONE and sslopt.pop(
'check_hostname', True)
if _can_use_sni():
sock = _wrap_sni_socket(sock, sslopt, hostname, check_hostname)
else:
sslopt.pop('check_hostname', True)
sock = ssl.wrap_socket(sock, **sslopt)
if not HAVE_CONTEXT_CHECK_HOSTNAME and check_hostname:
match_hostname(sock.getpeercert(), hostname)
return sock
def _tunnel(sock, host, port, auth):
debug("Connecting proxy...")
connect_header = "CONNECT %s:%d HTTP/1.0\r\n" % (host, port)
# TODO: support digest auth.
if auth and auth[0]:
auth_str = auth[0]
if auth[1]:
auth_str += ":" + auth[1]
encoded_str = base64encode(auth_str.encode()).strip().decode()
connect_header += "Proxy-Authorization: Basic %s\r\n" % encoded_str
connect_header += "\r\n"
dump("request header", connect_header)
send(sock, connect_header)
try:
status, resp_headers, status_message = read_headers(sock)
except Exception as e:
raise WebSocketProxyException(str(e))
if status != 200:
raise WebSocketProxyException(
"failed CONNECT via proxy status: %r" % status)
return sock
def read_headers(sock):
status = None
status_message = None
headers = {}
trace("--- response header ---")
while True:
line = recv_line(sock)
line = line.decode('utf-8').strip()
if not line:
break
trace(line)
if not status:
status_info = line.split(" ", 2)
status = int(status_info[1])
if len(status_info) > 2:
status_message = status_info[2]
else:
kv = line.split(":", 1)
if len(kv) == 2:
key, value = kv
headers[key.lower()] = value.strip()
else:
raise WebSocketException("Invalid header")
trace("-----------------------")
return status, headers, status_message
| |
from django import template
from fractions import Fraction
from hyphen import hyphenator, dictools, config
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django.conf import settings
import numpy as numpy
import datetime
from kcal.models import Event
import re
import os
register = template.Library()
@register.filter
def sum_calories(set_of_foods):
"Sums calories of a given meal"
sum = 0
for food in set_of_foods:
sum = sum + int(float(food.of_energy.kcal)
* float(Fraction(food.quantity)))
return sum
@register.filter
def sum_daily_calories(day_list):
"Sums calories of a given day"
sum = 0
rsum = 0
for event in day_list:
for food in event.consumption_set.all():
sum = sum + int(float(food.of_energy.kcal)
* float(Fraction(food.quantity)))
if food.of_energy.kcal > 0:
rsum = rsum + int(float(food.of_energy.kcal)
* float(Fraction(food.quantity)))
return [sum, rsum]
@register.filter
def commas(value,arg):
import re
__test__ = {}
re_digits_nondigits = re.compile(r'\d+|\D+')
parts = re_digits_nondigits.findall(arg % (value,))
for i in xrange(len(parts)):
s = parts[i]
if s.isdigit():
r = []
for j, c in enumerate(reversed(s)):
if j and (not (j % 3)):
r.insert(0, ',')
r.insert(0, c)
parts[i] = ''.join(r)
break
num = ''.join(parts)
num = re.sub(r'\-(.*)',r'<span class="neg"><span class="minus">−</span>\1</span>',num)
return num
@register.filter
def smartypants(value):
try:
import smartypants
return smartypants.smartyPants(value)
except:
return value
@register.filter
def strip_paras(value):
"""
Removes <p> tags from strings.
>>> strip_paras("<p>test</p>")
'test'
"""
return re.sub(r'<p>(.*)</p>',r'\1',value)
@register.filter
def fix_type_pre(value):
"""
So far this only modifies ellipses.
>>> fix_type_pre("... ....")
' <nobr> . . . </nobr> <nobr> . . . . </nobr> '
"""
new_value = value
new_value = re.sub(r'\.{3}[^\.]',r' <nobr> . . . </nobr> ', new_value)
new_value = re.sub(r'\.{4}',r' <nobr> . . . . </nobr> ', new_value)
return new_value
@register.filter
def fix_type_post(value):
new_value = value
# new_value = re.sub(r'\&',r'<i>&</i>', new_value)
# new_value = re.sub(r'\Wand\W',r' <i>&</i> ', new_value)
new_value = re.sub(r'\Wetc\.',r' <i>&</i>c. ', new_value)
new_value = re.sub(r'([\d\s])(oz|fl\. oz|c|pt|qt|gal|in|ft|yd|m|hr|min|tsp|tbsp)\.',r'\1<i>\2</i>.', new_value)
return new_value
# uni_fractions = {'1/2':'189',
# '1/3':'8531',
# '2/3':'8532',
# '1/4':'188',
# '3/4':'190',
# '1/5':'8533',
# '2/5':'8534',
# '3/5':'8535',
# '4/5':'8536',
# '1/6':'8537',
# '5/6':'8538',
# '1/8':'8539',
# '3/8':'8540',
# '5/8':'8541',
# '7/8':'8541',}
@register.filter
def fractionalize(value):
"""
Return some HTML when given a fraction.
"""
f = Fraction(value)
if f.denominator == f.numerator or f.denominator == 1:
return value
else:
fraction = '<div class="fraction"><span class="numerator">' \
+ str(f.numerator) \
+ '</span>/<span class="denominator">' \
+ str(f.denominator) \
+ '</span></div>'
return fraction
def day_link(date):
return "<a href=\"/ohlih/" + date.strftime("%Y/%m/%d") + "\">" + date.strftime("%b %e") + "</a>"
@register.filter
def previous_day(value):
"""
Give us a link to the previous day
"""
date = value['list'][0].time.date
events = Event.objects.filter(time__lt=date).order_by('-time')[:1]
if len(events)>0:
return '<' + day_link(events[0].time)
else:
return ''
@register.filter
def next_day(value):
"""
Give us a link to the next day
"""
date = value['list'][0].time
events = Event.objects.filter(time__gt=date).order_by('time')
if len(events)>0:
return day_link(events[0].time) + '>'
else:
return ''
@register.filter
def remaining(value):
return settings.TOTAL_ALLOWED_CALORIES - value
@register.filter
def hyphenate(value, arg=None, autoescape=None):
lang='en_US'
minlen = 5
h = hyphenator(lang,directory=settings.DEFAULT_DIC_PATH)
new = []
for word in value.split(u' '):
if len(word) > minlen and word.isalpha():
new.append(u'­'.join(h.syllables(word)))
else:
new.append(word)
result = u' '.join(new)
return result
hyphenate.needs_autoescape = True
def calorie_list(list_of_events):
"""
Get a simple list of calories out of a list of events
"""
the_list = []
for e in list_of_events:
val = 0
for food in e.consumption_set.all():
val += int(float(food.of_energy.kcal) \
* float(Fraction(food.quantity)))
the_list.append(val)
return the_list
def calorie_list_days(list_of_days):
"""
Get a simple list of calories out of a list of days (thus summing
the calories in each event)
"""
the_list = []
for day in list_of_days:
all_cals = calorie_list(day['list'])
cals_filtered = filter(lambda x: x>0, all_cals)
val = numpy.sum(cals_filtered)
the_list.append(val)
return the_list
def extract_info(cal_list):
"""
Take a list of calories.
"""
dict = {}
dict['cal_list'] = cal_list
dict['sum'] = numpy.sum(cal_list)
dict['median'] = numpy.median(cal_list)
dict['mean'] = int(numpy.mean(cal_list))
dict['min'] = numpy.min(cal_list)
dict['max'] = numpy.max(cal_list)
return [dict]
@register.filter
def process_events(list_of_events):
return extract_info(calorie_list(list_of_events))
@register.filter
def process_days(list_of_days):
calorie_list = calorie_list_days(list_of_days)
days = extract_info(calorie_list)
return days
@register.filter
def last_month(a_month):
"Subtracts a duration of 1 month"
return a_month - datetime.timedelta(weeks=4)
@register.filter
def weight_lost(list_of_days):
diff = 0
weights = []
for day in list_of_days:
for event in day['list']:
ws = event.weight_set.all()
if len(ws):
weights.append(ws[0].weight)
diff = weights[len(weights)-1] - weights[0]
return diff
@register.filter
def to_date(date):
y=int(date[0:4])
m=int(date[5:7])
d=int(date[8:10])
return datetime.date(y,m,d)
if __name__ == "__main__":
import doctest
doctest.testmod()
| |
"""Support for Melissa Climate A/C."""
from __future__ import annotations
import logging
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
FAN_AUTO,
FAN_HIGH,
FAN_LOW,
FAN_MEDIUM,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, PRECISION_WHOLE, TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import DATA_MELISSA
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_FAN_MODE | SUPPORT_TARGET_TEMPERATURE
OP_MODES = [
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_OFF,
]
FAN_MODES = [FAN_AUTO, FAN_HIGH, FAN_MEDIUM, FAN_LOW]
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Iterate through and add all Melissa devices."""
api = hass.data[DATA_MELISSA]
devices = (await api.async_fetch_devices()).values()
all_devices = []
for device in devices:
if device["type"] == "melissa":
all_devices.append(MelissaClimate(api, device["serial_number"], device))
async_add_entities(all_devices)
class MelissaClimate(ClimateEntity):
"""Representation of a Melissa Climate device."""
def __init__(self, api, serial_number, init_data):
"""Initialize the climate device."""
self._name = init_data["name"]
self._api = api
self._serial_number = serial_number
self._data = init_data["controller_log"]
self._state = None
self._cur_settings = None
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self._name
@property
def fan_mode(self):
"""Return the current fan mode."""
if self._cur_settings is not None:
return self.melissa_fan_to_hass(self._cur_settings[self._api.FAN])
@property
def current_temperature(self):
"""Return the current temperature."""
if self._data:
return self._data[self._api.TEMP]
@property
def current_humidity(self):
"""Return the current humidity value."""
if self._data:
return self._data[self._api.HUMIDITY]
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return PRECISION_WHOLE
@property
def hvac_mode(self):
"""Return the current operation mode."""
if self._cur_settings is None:
return None
is_on = self._cur_settings[self._api.STATE] in (
self._api.STATE_ON,
self._api.STATE_IDLE,
)
if not is_on:
return HVAC_MODE_OFF
return self.melissa_op_to_hass(self._cur_settings[self._api.MODE])
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return OP_MODES
@property
def fan_modes(self):
"""List of available fan modes."""
return FAN_MODES
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
if self._cur_settings is None:
return None
return self._cur_settings[self._api.TEMP]
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return TEMP_CELSIUS
@property
def min_temp(self):
"""Return the minimum supported temperature for the thermostat."""
return 16
@property
def max_temp(self):
"""Return the maximum supported temperature for the thermostat."""
return 30
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temp = kwargs.get(ATTR_TEMPERATURE)
await self.async_send({self._api.TEMP: temp})
async def async_set_fan_mode(self, fan_mode):
"""Set fan mode."""
melissa_fan_mode = self.hass_fan_to_melissa(fan_mode)
await self.async_send({self._api.FAN: melissa_fan_mode})
async def async_set_hvac_mode(self, hvac_mode):
"""Set operation mode."""
if hvac_mode == HVAC_MODE_OFF:
await self.async_send({self._api.STATE: self._api.STATE_OFF})
return
mode = self.hass_mode_to_melissa(hvac_mode)
await self.async_send(
{self._api.MODE: mode, self._api.STATE: self._api.STATE_ON}
)
async def async_send(self, value):
"""Send action to service."""
try:
old_value = self._cur_settings.copy()
self._cur_settings.update(value)
except AttributeError:
old_value = None
if not await self._api.async_send(
self._serial_number, "melissa", self._cur_settings
):
self._cur_settings = old_value
async def async_update(self):
"""Get latest data from Melissa."""
try:
self._data = (await self._api.async_status(cached=True))[
self._serial_number
]
self._cur_settings = (
await self._api.async_cur_settings(self._serial_number)
)["controller"]["_relation"]["command_log"]
except KeyError:
_LOGGER.warning("Unable to update entity %s", self.entity_id)
def melissa_op_to_hass(self, mode):
"""Translate Melissa modes to hass states."""
if mode == self._api.MODE_HEAT:
return HVAC_MODE_HEAT
if mode == self._api.MODE_COOL:
return HVAC_MODE_COOL
if mode == self._api.MODE_DRY:
return HVAC_MODE_DRY
if mode == self._api.MODE_FAN:
return HVAC_MODE_FAN_ONLY
_LOGGER.warning("Operation mode %s could not be mapped to hass", mode)
return None
def melissa_fan_to_hass(self, fan):
"""Translate Melissa fan modes to hass modes."""
if fan == self._api.FAN_AUTO:
return HVAC_MODE_AUTO
if fan == self._api.FAN_LOW:
return FAN_LOW
if fan == self._api.FAN_MEDIUM:
return FAN_MEDIUM
if fan == self._api.FAN_HIGH:
return FAN_HIGH
_LOGGER.warning("Fan mode %s could not be mapped to hass", fan)
return None
def hass_mode_to_melissa(self, mode):
"""Translate hass states to melissa modes."""
if mode == HVAC_MODE_HEAT:
return self._api.MODE_HEAT
if mode == HVAC_MODE_COOL:
return self._api.MODE_COOL
if mode == HVAC_MODE_DRY:
return self._api.MODE_DRY
if mode == HVAC_MODE_FAN_ONLY:
return self._api.MODE_FAN
_LOGGER.warning("Melissa have no setting for %s mode", mode)
def hass_fan_to_melissa(self, fan):
"""Translate hass fan modes to melissa modes."""
if fan == HVAC_MODE_AUTO:
return self._api.FAN_AUTO
if fan == FAN_LOW:
return self._api.FAN_LOW
if fan == FAN_MEDIUM:
return self._api.FAN_MEDIUM
if fan == FAN_HIGH:
return self._api.FAN_HIGH
_LOGGER.warning("Melissa have no setting for %s fan mode", fan)
| |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import convertible.lite_types as lite_types
import convertible.ttypes as ttypes
import convertible.types as types
from thrift.py3lite.converter import to_py3lite_struct
class Py3toPy3liteConverterTest(unittest.TestCase):
def test_simple(self) -> None:
simple = to_py3lite_struct(
lite_types.Simple,
types.Simple(
intField=42,
strField="simple",
intList=[1, 2, 3],
strSet={"hello", "world"},
strToIntMap={"one": 1, "two": 2},
color=types.Color.GREEN,
name_="myname",
),
)
self.assertEqual(simple.intField, 42)
self.assertEqual(simple.strField, "simple")
self.assertEqual(simple.intList, [1, 2, 3])
self.assertEqual(simple.strSet, {"hello", "world"})
self.assertEqual(simple.strToIntMap, {"one": 1, "two": 2})
self.assertEqual(simple.color, lite_types.Color.GREEN)
self.assertEqual(simple.name_, "myname")
def test_nested(self) -> None:
nested = to_py3lite_struct(
lite_types.Nested,
types.Nested(
simpleField=types.Simple(
intField=42,
strField="simple",
intList=[1, 2, 3],
strSet={"hello", "world"},
strToIntMap={"one": 1, "two": 2},
color=types.Color.NONE,
name_="myname",
),
simpleList=[
types.Simple(
intField=200,
strField="face",
intList=[4, 5, 6],
strSet={"keep", "calm"},
strToIntMap={"three": 3, "four": 4},
color=types.Color.RED,
name_="myname",
),
types.Simple(
intField=404,
strField="b00k",
intList=[7, 8, 9],
strSet={"carry", "on"},
strToIntMap={"five": 5, "six": 6},
color=types.Color.GREEN,
name_="myname",
),
],
colorToSimpleMap={
types.Color.BLUE: types.Simple(
intField=500,
strField="internal",
intList=[10],
strSet={"server", "error"},
strToIntMap={"seven": 7, "eight": 8, "nine": 9},
color=types.Color.BLUE,
name_="myname",
)
},
),
)
self.assertEqual(nested.simpleField.intField, 42)
self.assertEqual(nested.simpleList[0].intList, [4, 5, 6])
self.assertEqual(nested.simpleList[1].strSet, {"carry", "on"})
self.assertEqual(
nested.colorToSimpleMap[lite_types.Color.BLUE].color,
lite_types.Color.BLUE,
)
def test_simple_union(self) -> None:
simple_union = to_py3lite_struct(lite_types.Union, types.Union(intField=42))
self.assertEqual(simple_union.type, lite_types.Union.Type.intField)
self.assertEqual(simple_union.value, 42)
def test_union_with_py3_name_annotation(self) -> None:
simple_union = to_py3lite_struct(lite_types.Union, types.Union(name_="myname"))
self.assertEqual(simple_union.type, lite_types.Union.Type.name_)
self.assertEqual(simple_union.value, "myname")
def test_union_with_containers(self) -> None:
union_with_list = to_py3lite_struct(
lite_types.Union, types.Union(intList=[1, 2, 3])
)
self.assertEqual(union_with_list.type, lite_types.Union.Type.intList)
self.assertEqual(union_with_list.value, [1, 2, 3])
def test_complex_union(self) -> None:
complex_union = to_py3lite_struct(
lite_types.Union,
types.Union(
simple_=types.Simple(
intField=42,
strField="simple",
intList=[1, 2, 3],
strSet={"hello", "world"},
strToIntMap={"one": 1, "two": 2},
color=types.Color.NONE,
)
),
)
self.assertEqual(complex_union.type, lite_types.Union.Type.simple_)
self.assertEqual(complex_union.simple_.intField, 42)
class PytoPy3liteConverterTest(unittest.TestCase):
def test_simple(self) -> None:
simple = to_py3lite_struct(
lite_types.Simple,
ttypes.Simple(
intField=42,
strField="simple",
intList=[1, 2, 3],
strSet={"hello", "world"},
strToIntMap={"one": 1, "two": 2},
color=ttypes.Color.GREEN,
name="myname",
),
)
self.assertEqual(simple.intField, 42)
self.assertEqual(simple.strField, "simple")
self.assertEqual(simple.intList, [1, 2, 3])
self.assertEqual(simple.strSet, {"hello", "world"})
self.assertEqual(simple.strToIntMap, {"one": 1, "two": 2})
self.assertEqual(simple.color, lite_types.Color.GREEN)
self.assertEqual(simple.name_, "myname")
def test_nested(self) -> None:
nested = to_py3lite_struct(
lite_types.Nested,
ttypes.Nested(
simpleField=ttypes.Simple(
intField=42,
strField="simple",
intList=[1, 2, 3],
strSet={"hello", "world"},
strToIntMap={"one": 1, "two": 2},
color=ttypes.Color.NONE,
name="myname",
),
simpleList=[
ttypes.Simple(
intField=200,
strField="face",
intList=[4, 5, 6],
strSet={"keep", "calm"},
strToIntMap={"three": 3, "four": 4},
color=ttypes.Color.RED,
name="myname",
),
ttypes.Simple(
intField=404,
strField="b00k",
intList=[7, 8, 9],
strSet={"carry", "on"},
strToIntMap={"five": 5, "six": 6},
color=ttypes.Color.GREEN,
name="myname",
),
],
colorToSimpleMap={
ttypes.Color.BLUE: ttypes.Simple(
intField=500,
strField="internal",
intList=[10],
strSet={"server", "error"},
strToIntMap={"seven": 7, "eight": 8, "nine": 9},
color=ttypes.Color.BLUE,
name="myname",
)
},
),
)
self.assertEqual(nested.simpleField.intField, 42)
self.assertEqual(nested.simpleList[0].intList, [4, 5, 6])
self.assertEqual(nested.simpleList[1].strSet, {"carry", "on"})
self.assertEqual(
nested.colorToSimpleMap[lite_types.Color.BLUE].color,
lite_types.Color.BLUE,
)
def test_simple_union(self) -> None:
simple_union = to_py3lite_struct(lite_types.Union, ttypes.Union(intField=42))
self.assertEqual(simple_union.type, lite_types.Union.Type.intField)
self.assertEqual(simple_union.value, 42)
def test_union_with_py3_name_annotation(self) -> None:
simple_union = to_py3lite_struct(lite_types.Union, ttypes.Union(name="myname"))
self.assertEqual(simple_union.type, lite_types.Union.Type.name_)
self.assertEqual(simple_union.value, "myname")
def test_union_with_containers(self) -> None:
union_with_list = to_py3lite_struct(
lite_types.Union, ttypes.Union(intList=[1, 2, 3])
)
self.assertEqual(union_with_list.type, lite_types.Union.Type.intList)
self.assertEqual(union_with_list.value, [1, 2, 3])
def test_complex_union(self) -> None:
complex_union = to_py3lite_struct(
lite_types.Union,
ttypes.Union(
simpleField=ttypes.Simple(
intField=42,
strField="simple",
intList=[1, 2, 3],
strSet={"hello", "world"},
strToIntMap={"one": 1, "two": 2},
color=ttypes.Color.NONE,
)
),
)
self.assertEqual(complex_union.type, lite_types.Union.Type.simple_)
self.assertEqual(complex_union.simple_.intField, 42)
| |
from __future__ import absolute_import
from django.conf import settings
from zerver.models import get_client, UserProfile
from zerver.lib.response import json_success
from zerver.lib.validator import check_dict
from zerver.decorator import authenticated_api_view, REQ, has_request_variables, to_non_negative_int, flexible_boolean
from zerver.views.messages import send_message_backend
from zerver.lib.webhooks.git import get_push_commits_event_message,\
SUBJECT_WITH_BRANCH_TEMPLATE, get_force_push_commits_event_message, \
get_remove_branch_event_message, get_pull_request_event_message,\
get_issue_event_message, SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE,\
get_commits_comment_action_message
import logging
import re
import ujson
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Text
from zerver.lib.str_utils import force_str
from django.http import HttpRequest, HttpResponse
ZULIP_TEST_REPO_NAME = 'zulip-test'
ZULIP_TEST_REPO_ID = 6893087
def is_test_repository(repository):
# type: (Mapping[Text, Any]) -> bool
return repository['name'] == ZULIP_TEST_REPO_NAME and repository['id'] == ZULIP_TEST_REPO_ID
class UnknownEventType(Exception):
pass
def github_pull_request_content(payload):
# type: (Mapping[Text, Any]) -> Text
pull_request = payload['pull_request']
action = get_pull_request_or_issue_action(payload)
if action in ('opened', 'edited'):
return get_pull_request_event_message(
payload['sender']['login'],
action,
pull_request['html_url'],
pull_request['number'],
pull_request['head']['ref'],
pull_request['base']['ref'],
pull_request['body'],
get_pull_request_or_issue_assignee(pull_request)
)
return get_pull_request_event_message(
payload['sender']['login'],
action,
pull_request['html_url'],
pull_request['number']
)
def github_issues_content(payload):
# type: (Mapping[Text, Any]) -> Text
issue = payload['issue']
action = get_pull_request_or_issue_action(payload)
if action in ('opened', 'edited'):
return get_issue_event_message(
payload['sender']['login'],
action,
issue['html_url'],
issue['number'],
issue['body'],
get_pull_request_or_issue_assignee(issue)
)
return get_issue_event_message(
payload['sender']['login'],
action,
issue['html_url'],
issue['number'],
)
def github_object_commented_content(payload, type):
# type: (Mapping[Text, Any], Text) -> Text
comment = payload['comment']
issue = payload['issue']
action = u'[commented]({}) on'.format(comment['html_url'])
return get_pull_request_event_message(
comment['user']['login'],
action,
issue['html_url'],
issue['number'],
message=comment['body'],
type=type
)
def get_pull_request_or_issue_action(payload):
# type: (Mapping[Text, Any]) -> Text
return 'synchronized' if payload['action'] == 'synchronize' else payload['action']
def get_pull_request_or_issue_assignee(object_payload):
# type: (Mapping[Text, Any]) -> Optional[Text]
assignee_dict = object_payload.get('assignee')
if assignee_dict:
return assignee_dict.get('login')
return None
def get_pull_request_or_issue_subject(repository, payload_object, type):
# type: (Mapping[Text, Any], Mapping[Text, Any], Text) -> Text
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=repository['name'],
type=type,
id=payload_object['number'],
title=payload_object['title']
)
def github_generic_subject(noun, topic_focus, blob):
# type: (Text, Text, Mapping[Text, Any]) -> Text
# issue and pull_request objects have the same fields we're interested in
return u'%s: %s %d: %s' % (topic_focus, noun, blob['number'], blob['title'])
def api_github_v1(user_profile, event, payload, branches, stream, **kwargs):
# type: (UserProfile, Text, Mapping[Text, Any], Text, Text, **Any) -> Tuple[Text, Text, Text]
"""
processes github payload with version 1 field specification
`payload` comes in unmodified from github
`stream` is set to 'commits' if otherwise unset
"""
commit_stream = stream
issue_stream = 'issues'
return api_github_v2(user_profile, event, payload, branches, stream, commit_stream, issue_stream, **kwargs)
def api_github_v2(user_profile, event, payload, branches, default_stream,
commit_stream, issue_stream, topic_focus = None):
# type: (UserProfile, Text, Mapping[Text, Any], Text, Text, Text, Text, Optional[Text]) -> Tuple[Text, Text, Text]
"""
processes github payload with version 2 field specification
`payload` comes in unmodified from github
`default_stream` is set to what `stream` is in v1 above
`commit_stream` and `issue_stream` fall back to `default_stream` if they are empty
This and allowing alternative endpoints is what distinguishes v1 from v2 of the github configuration
"""
target_stream = commit_stream if commit_stream else default_stream
issue_stream = issue_stream if issue_stream else default_stream
repository = payload['repository']
topic_focus = topic_focus if topic_focus else repository['name']
# Event Handlers
if event == 'pull_request':
subject = get_pull_request_or_issue_subject(repository, payload['pull_request'], 'PR')
content = github_pull_request_content(payload)
elif event == 'issues':
# in v1, we assume that this stream exists since it is
# deprecated and the few realms that use it already have the
# stream
target_stream = issue_stream
subject = get_pull_request_or_issue_subject(repository, payload['issue'], 'Issue')
content = github_issues_content(payload)
elif event == 'issue_comment':
# Comments on both issues and pull requests come in as issue_comment events
issue = payload['issue']
if 'pull_request' not in issue or issue['pull_request']['diff_url'] is None:
# It's an issues comment
target_stream = issue_stream
type = 'Issue'
subject = get_pull_request_or_issue_subject(repository, payload['issue'], type)
else:
# It's a pull request comment
type = 'PR'
subject = get_pull_request_or_issue_subject(repository, payload['issue'], type)
content = github_object_commented_content(payload, type)
elif event == 'push':
subject, content = build_message_from_gitlog(user_profile, topic_focus,
payload['ref'], payload['commits'],
payload['before'], payload['after'],
payload['compare'],
payload['pusher']['name'],
forced=payload['forced'],
created=payload['created'])
elif event == 'commit_comment':
subject = topic_focus
comment = payload.get('comment')
action = u'[commented]({})'.format(comment['html_url'])
content = get_commits_comment_action_message(
comment['user']['login'],
action,
comment['html_url'].split('#', 1)[0],
comment['commit_id'],
comment['body'],
)
else:
raise UnknownEventType(force_str(u'Event %s is unknown and cannot be handled' % (event,)))
return target_stream, subject, content
@authenticated_api_view(is_webhook=True)
@has_request_variables
def api_github_landing(request, user_profile, event=REQ(),
payload=REQ(validator=check_dict([])),
branches=REQ(default=''),
stream=REQ(default=''),
version=REQ(converter=to_non_negative_int, default=1),
commit_stream=REQ(default=''),
issue_stream=REQ(default=''),
exclude_pull_requests=REQ(converter=flexible_boolean, default=False),
exclude_issues=REQ(converter=flexible_boolean, default=False),
exclude_commits=REQ(converter=flexible_boolean, default=False),
emphasize_branch_in_topic=REQ(converter=flexible_boolean, default=False),
):
# type: (HttpRequest, UserProfile, Text, Mapping[Text, Any], Text, Text, int, Text, Text, bool, bool, bool, bool) -> HttpResponse
repository = payload['repository']
# Special hook for capturing event data. If we see our special test repo, log the payload from github.
try:
if is_test_repository(repository) and settings.PRODUCTION:
with open('/var/log/zulip/github-payloads', 'a') as f:
f.write(ujson.dumps({'event': event,
'payload': payload,
'branches': branches,
'stream': stream,
'version': version,
'commit_stream': commit_stream,
'issue_stream': issue_stream,
'exclude_pull_requests': exclude_pull_requests,
'exclude_issues': exclude_issues,
'exclude_commits': exclude_commits,
'emphasize_branch_in_topic': emphasize_branch_in_topic,
}))
f.write('\n')
except Exception:
logging.exception('Error while capturing Github event')
if not stream:
stream = 'commits'
short_ref = re.sub(r'^refs/heads/', '', payload.get('ref', ''))
kwargs = dict()
if emphasize_branch_in_topic and short_ref:
kwargs['topic_focus'] = short_ref
allowed_events = set()
if not exclude_pull_requests:
allowed_events.add('pull_request')
if not exclude_issues:
allowed_events.add('issues')
allowed_events.add('issue_comment')
if not exclude_commits:
allowed_events.add('push')
allowed_events.add('commit_comment')
if event not in allowed_events:
return json_success()
# We filter issue_comment events for issue creation events
if event == 'issue_comment' and payload['action'] != 'created':
return json_success()
if event == 'push':
# If we are given a whitelist of branches, then we silently ignore
# any push notification on a branch that is not in our whitelist.
if branches and short_ref not in re.split('[\s,;|]+', branches):
return json_success()
# Map payload to the handler with the right version
if version == 2:
target_stream, subject, content = api_github_v2(user_profile, event, payload, branches,
stream, commit_stream, issue_stream,
**kwargs)
else:
target_stream, subject, content = api_github_v1(user_profile, event, payload, branches,
stream, **kwargs)
request.client = get_client('ZulipGitHubWebhook')
return send_message_backend(request, user_profile,
message_type_name='stream',
message_to=[target_stream],
forged=False, subject_name=subject,
message_content=content)
def build_message_from_gitlog(user_profile, name, ref, commits, before, after, url, pusher, forced=None, created=None):
# type: (UserProfile, Text, Text, List[Dict[str, str]], Text, Text, Text, Text, Optional[Text], Optional[Text]) -> Tuple[Text, Text]
short_ref = re.sub(r'^refs/heads/', '', ref)
subject = SUBJECT_WITH_BRANCH_TEMPLATE.format(repo=name, branch=short_ref)
if re.match(r'^0+$', after):
content = get_remove_branch_event_message(pusher, short_ref)
# 'created' and 'forced' are github flags; the second check is for beanstalk
elif (forced and not created) or (forced is None and len(commits) == 0):
content = get_force_push_commits_event_message(pusher, url, short_ref, after[:7])
else:
commits = _transform_commits_list_to_common_format(commits)
content = get_push_commits_event_message(pusher, url, short_ref, commits)
return subject, content
def _transform_commits_list_to_common_format(commits):
# type: (List[Dict[str, Any]]) -> List[Dict[str, str]]
new_commits_list = []
for commit in commits:
new_commits_list.append({
'name': commit.get('committer').get('username'),
'sha': commit.get('id'),
'url': commit.get('url'),
'message': commit.get('message'),
})
return new_commits_list
| |
"""
File Name: UnoPytorch/cl_class_dataset.py
Author: Xiaotian Duan (xduan7)
Email: xduan7@uchicago.edu
Date: 8/13/18
Python Version: 3.6.6
File Description:
This file implements the dataset for cell line classification.
"""
import logging
import torch
import numpy as np
import pandas as pd
import torch.utils.data as data
from sklearn.model_selection import train_test_split
from utils.data_processing.cell_line_dataframes import get_rna_seq_df, \
get_cl_meta_df
from utils.data_processing.label_encoding import encode_int_to_onehot, \
get_label_dict
logger = logging.getLogger(__name__)
class CLClassDataset(data.Dataset):
"""Dataset class for cell line classification
This class implements a PyTorch Dataset class made for cell line
classification. Using enumerate() or any other methods that utilize
__getitem__() to access the data.
Each data item is made of a tuple of
(RNA_sequence, conditions, site, type, category)
where conditions is a list of [data_source, cell_description].
Note that all categorical labels are numeric, and the encoding
dictionary can be found in the processed folder.
Attributes:
training (bool): indicator of training/validation dataset
cells (list): list of all the cells in the dataset
num_cells (int): number of cell lines in the dataset
rnaseq_dim (int): dimensionality of RNA sequence
"""
def __init__(
self,
data_root: str,
training: bool,
rand_state: int = 0,
summary: bool = True,
# Data type settings (for storage and data loading)
int_dtype: type = np.int8,
float_dtype: type = np.float16,
output_dtype: type = np.float32,
# Pre-processing settings
rnaseq_scaling: str = 'std',
# Partitioning (train/validation) and data usage settings
rnaseq_feature_usage: str = 'source_scale',
validation_ratio: float = 0.2, ):
"""dataset = CLClassDataset('./data/', True)
Construct a RNA sequence dataset based on the parameters provided.
The process includes:
* Downloading source data files;
* Pre-processing (scaling);
* Public attributes and other preparations.
Args:
data_root (str): path to data root folder.
training (bool): indicator for training.
rand_state (int): random seed used for training/validation split
and other processes that requires randomness.
summary (bool): set True for printing dataset summary.
int_dtype (type): integer dtype for data storage in RAM.
float_dtype (type): float dtype for data storage in RAM.
output_dtype (type): output dtype for neural network.
rnaseq_scaling (str): scaling method for RNA squence. Choose
between 'none', 'std', and 'minmax'.
rnaseq_feature_usage: RNA sequence data usage. Choose between
'source_scale' and 'combat'.
validation_ratio (float): portion of validation data out of all
data samples.
"""
# Initialization ######################################################
self.__data_root = data_root
# Class-wise variables
self.training = training
self.__rand_state = rand_state
self.__output_dtype = output_dtype
# Feature scaling
if rnaseq_scaling is None or rnaseq_scaling == '':
rnaseq_scaling = 'none'
self.__rnaseq_scaling = rnaseq_scaling.lower()
self.__rnaseq_feature_usage = rnaseq_feature_usage
self.__validation_ratio = validation_ratio
# Load all dataframes #################################################
self.__rnaseq_df = get_rna_seq_df(
data_root=data_root,
rnaseq_feature_usage=rnaseq_feature_usage,
rnaseq_scaling=rnaseq_scaling,
float_dtype=float_dtype)
self.__cl_meta_df = get_cl_meta_df(
data_root=data_root,
int_dtype=int_dtype)
# Put all the sequence in one column as list and specify dtype
self.__rnaseq_df['seq'] = \
list(map(float_dtype, self.__rnaseq_df.values.tolist()))
# Join the RNA sequence data with meta data. cl_df will have columns:
# ['data_src', 'site', 'type', 'category', 'seq']
self.__cl_df = pd.concat([self.__cl_meta_df,
self.__rnaseq_df[['seq']]],
axis=1, join='inner')
# Encode data source from int into one-hot encoding
num_data_src = len(get_label_dict(data_root, 'data_src_dict.txt'))
enc_data_src = encode_int_to_onehot(self.__cl_df['data_src'].tolist(),
num_classes=num_data_src)
self.__cl_df['data_src'] = list(map(int_dtype, enc_data_src))
# Train/validation split ##############################################
self.__split_drug_resp()
# Converting dataframes to arrays for rapid access ####################
self.__cl_array = self.__cl_df.values
# Public attributes ###################################################
self.cells = self.__cl_df.index.tolist()
self.num_cells = self.__cl_df.shape[0]
self.rnaseq_dim = len(self.__cl_df.iloc[0]['seq'])
# Clear the dataframes ################################################
self.__rnaseq_df = None
self.__cl_meta_df = None
self.__cl_df = None
# Dataset summary #####################################################
if summary:
print('=' * 80)
print(('Training' if self.training else 'Validation')
+ ' RNA Sequence Dataset Summary:')
print('\t%i Unique Cell Lines (feature dim: %4i).'
% (self.num_cells, self.rnaseq_dim))
print('=' * 80)
def __len__(self):
"""length = len(cl_class_dataset)
Get the length of dataset, which is the number of cell lines.
Returns:
int: the length of dataset.
"""
return self.num_cells
def __getitem__(self, index):
"""rnaseq, data_src, site, type, category = cl_class_dataset[0]
Args:
index (int): index for target data slice.
Returns:
tuple: a tuple containing the following five elements:
* RNA sequence data (np.ndarray of float);
* one-hot-encoded data source (np.ndarray of float);
* encoded cell line site (int);
* encoded cell line type (int);
* encoded cell line category (int)
"""
cl_data = self.__cl_array[index]
rnaseq = np.asarray(cl_data[4], dtype=self.__output_dtype)
data_src = np.array(cl_data[0], dtype=self.__output_dtype)
# Note that PyTorch requires np.int64 for classification labels
cl_site = np.int64(cl_data[1])
cl_type = np.int64(cl_data[2])
cl_category = np.int64(cl_data[3])
return rnaseq, data_src, cl_site, cl_type, cl_category
def __split_drug_resp(self):
"""self.__split_drug_resp()
Split training and validation dataframe for cell lines, stratified
on tumor type. Note that after the split, our dataframe will only
contain training/validation data based on training indicator.
Returns:
None
"""
split_kwargs = {
'test_size': self.__validation_ratio,
'random_state': self.__rand_state,
'shuffle': True, }
try:
training_cl_df, validation_cl_df = \
train_test_split(self.__cl_df, **split_kwargs,
stratify=self.__cl_df['type'].tolist())
except ValueError:
logger.warning('Failed to split cell lines in stratified way. '
'Splitting randomly ...')
training_cl_df, validation_cl_df = \
train_test_split(self.__cl_df, **split_kwargs)
self.__cl_df = training_cl_df if self.training else validation_cl_df
# Test segment for cell line classification dataset
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
# Test DrugRespDataset class
dataloader = torch.utils.data.DataLoader(
CLClassDataset(data_root='../../data/',
training=False),
batch_size=512, shuffle=False)
tmp = dataloader.dataset[0]
| |
"""Support for LaCrosse sensor components."""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.sensor import ENTITY_ID_FORMAT, PLATFORM_SCHEMA
from homeassistant.const import (
CONF_DEVICE,
CONF_ID,
CONF_NAME,
CONF_SENSORS,
CONF_TYPE,
EVENT_HOMEASSISTANT_STOP,
TEMP_CELSIUS,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_BAUD = "baud"
CONF_DATARATE = "datarate"
CONF_EXPIRE_AFTER = "expire_after"
CONF_FREQUENCY = "frequency"
CONF_JEELINK_LED = "led"
CONF_TOGGLE_INTERVAL = "toggle_interval"
CONF_TOGGLE_MASK = "toggle_mask"
DEFAULT_DEVICE = "/dev/ttyUSB0"
DEFAULT_BAUD = "57600"
DEFAULT_EXPIRE_AFTER = 300
TYPES = ["battery", "humidity", "temperature"]
SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_ID): cv.positive_int,
vol.Required(CONF_TYPE): vol.In(TYPES),
vol.Optional(CONF_EXPIRE_AFTER): cv.positive_int,
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(SENSOR_SCHEMA),
vol.Optional(CONF_BAUD, default=DEFAULT_BAUD): cv.string,
vol.Optional(CONF_DATARATE): cv.positive_int,
vol.Optional(CONF_DEVICE, default=DEFAULT_DEVICE): cv.string,
vol.Optional(CONF_FREQUENCY): cv.positive_int,
vol.Optional(CONF_JEELINK_LED): cv.boolean,
vol.Optional(CONF_TOGGLE_INTERVAL): cv.positive_int,
vol.Optional(CONF_TOGGLE_MASK): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the LaCrosse sensors."""
import pylacrosse
from serial import SerialException
usb_device = config.get(CONF_DEVICE)
baud = int(config.get(CONF_BAUD))
expire_after = config.get(CONF_EXPIRE_AFTER)
_LOGGER.debug("%s %s", usb_device, baud)
try:
lacrosse = pylacrosse.LaCrosse(usb_device, baud)
lacrosse.open()
except SerialException as exc:
_LOGGER.warning("Unable to open serial port: %s", exc)
return False
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, lacrosse.close)
if CONF_JEELINK_LED in config:
lacrosse.led_mode_state(config.get(CONF_JEELINK_LED))
if CONF_FREQUENCY in config:
lacrosse.set_frequency(config.get(CONF_FREQUENCY))
if CONF_DATARATE in config:
lacrosse.set_datarate(config.get(CONF_DATARATE))
if CONF_TOGGLE_INTERVAL in config:
lacrosse.set_toggle_interval(config.get(CONF_TOGGLE_INTERVAL))
if CONF_TOGGLE_MASK in config:
lacrosse.set_toggle_mask(config.get(CONF_TOGGLE_MASK))
lacrosse.start_scan()
sensors = []
for device, device_config in config[CONF_SENSORS].items():
_LOGGER.debug("%s %s", device, device_config)
typ = device_config.get(CONF_TYPE)
sensor_class = TYPE_CLASSES[typ]
name = device_config.get(CONF_NAME, device)
sensors.append(
sensor_class(hass, lacrosse, device, name, expire_after, device_config)
)
add_entities(sensors)
class LaCrosseSensor(Entity):
"""Implementation of a Lacrosse sensor."""
_temperature = None
_humidity = None
_low_battery = None
_new_battery = None
def __init__(self, hass, lacrosse, device_id, name, expire_after, config):
"""Initialize the sensor."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._config = config
self._name = name
self._value = None
self._expire_after = expire_after
self._expiration_trigger = None
lacrosse.register_callback(
int(self._config["id"]), self._callback_lacrosse, None
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = {
"low_battery": self._low_battery,
"new_battery": self._new_battery,
}
return attributes
def _callback_lacrosse(self, lacrosse_sensor, user_data):
"""Handle a function that is called from pylacrosse with new values."""
if self._expire_after is not None and self._expire_after > 0:
# Reset old trigger
if self._expiration_trigger:
self._expiration_trigger()
self._expiration_trigger = None
# Set new trigger
expiration_at = dt_util.utcnow() + timedelta(seconds=self._expire_after)
self._expiration_trigger = async_track_point_in_utc_time(
self.hass, self.value_is_expired, expiration_at
)
self._temperature = lacrosse_sensor.temperature
self._humidity = lacrosse_sensor.humidity
self._low_battery = lacrosse_sensor.low_battery
self._new_battery = lacrosse_sensor.new_battery
@callback
def value_is_expired(self, *_):
"""Triggered when value is expired."""
self._expiration_trigger = None
self._value = None
self.async_schedule_update_ha_state()
class LaCrosseTemperature(LaCrosseSensor):
"""Implementation of a Lacrosse temperature sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def state(self):
"""Return the state of the sensor."""
return self._temperature
class LaCrosseHumidity(LaCrosseSensor):
"""Implementation of a Lacrosse humidity sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return "%"
@property
def state(self):
"""Return the state of the sensor."""
return self._humidity
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:water-percent"
class LaCrosseBattery(LaCrosseSensor):
"""Implementation of a Lacrosse battery sensor."""
@property
def state(self):
"""Return the state of the sensor."""
if self._low_battery is None:
state = None
elif self._low_battery is True:
state = "low"
else:
state = "ok"
return state
@property
def icon(self):
"""Icon to use in the frontend."""
if self._low_battery is None:
icon = "mdi:battery-unknown"
elif self._low_battery is True:
icon = "mdi:battery-alert"
else:
icon = "mdi:battery"
return icon
TYPE_CLASSES = {
"temperature": LaCrosseTemperature,
"humidity": LaCrosseHumidity,
"battery": LaCrosseBattery,
}
| |
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_vmware import exceptions as vexc
from oslo_vmware import vim_util
from nova import exception
from nova.network import model as network_model
from nova import test
from nova.tests.unit import matchers
from nova.tests.unit import utils
from nova.tests.unit.virt.vmwareapi import fake
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vm_util
class VMwareVifTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVifTestCase, self).setUp()
self.flags(vlan_interface='vmnet0', group='vmware')
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True)
self._network = network
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
self.session = fake.FakeSession()
self.cluster = None
def tearDown(self):
super(VMwareVifTestCase, self).tearDown()
def test_ensure_vlan_bridge(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(None)
network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
self.cluster).AndReturn('vmnet0')
network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
self.cluster).AndReturn(True)
network_util.create_port_group(self.session, 'fa0', 'vmnet0', 3,
self.cluster)
network_util.get_network_with_the_name(self.session, 'fa0', None)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=True)
# FlatDHCP network mode without vlan - network doesn't exist with the host
def test_ensure_vlan_bridge_without_vlan(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(None)
network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
self.cluster).AndReturn('vmnet0')
network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
self.cluster).AndReturn(True)
network_util.create_port_group(self.session, 'fa0', 'vmnet0', 0,
self.cluster)
network_util.get_network_with_the_name(self.session, 'fa0', None)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
# FlatDHCP network mode without vlan - network exists with the host
# Get vswitch and check vlan interface should not be called
def test_ensure_vlan_bridge_with_network(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
vm_network = {'name': 'VM Network', 'type': 'Network'}
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(vm_network)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
# Flat network mode with DVS
def test_ensure_vlan_bridge_with_existing_dvs(self):
network_ref = {'dvpg': 'dvportgroup-2062',
'type': 'DistributedVirtualPortgroup'}
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(network_ref)
self.mox.ReplayAll()
ref = vif.ensure_vlan_bridge(self.session,
self.vif,
create_vlan=False)
self.assertThat(ref, matchers.DictMatches(network_ref))
def test_get_network_ref_flat_dhcp(self):
self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
create_vlan=False)
self.mox.ReplayAll()
vif.get_network_ref(self.session, self.cluster, self.vif, False)
def test_get_network_ref_bridge(self):
self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
create_vlan=True)
self.mox.ReplayAll()
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True,
should_create_vlan=True)
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
vif.get_network_ref(self.session, self.cluster, self.vif, False)
def test_create_port_group_already_exists(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'AddPortGroup':
raise vexc.AlreadyExistsException()
with test.nested(
mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
mock.patch.object(vm_util, 'get_host_ref'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_add_vswitch, _get_host, _call_method):
network_util.create_port_group(self.session, 'pg_name',
'vswitch_name', vlan_id=0,
cluster=None)
def test_create_port_group_exception(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'AddPortGroup':
raise vexc.VMwareDriverException()
with test.nested(
mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
mock.patch.object(vm_util, 'get_host_ref'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_add_vswitch, _get_host, _call_method):
self.assertRaises(vexc.VMwareDriverException,
network_util.create_port_group,
self.session, 'pg_name',
'vswitch_name', vlan_id=0,
cluster=None)
def test_get_vif_info_none(self):
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'is_neutron', 'fake_model', None)
self.assertEqual([], vif_info)
def test_get_vif_info_empty_list(self):
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'is_neutron', 'fake_model', [])
self.assertEqual([], vif_info)
@mock.patch.object(vif, 'get_network_ref', return_value='fake_ref')
def test_get_vif_info(self, mock_get_network_ref):
network_info = utils.get_test_network_info()
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'is_neutron', 'fake_model', network_info)
expected = [{'iface_id': 'vif-xxx-yyy-zzz',
'mac_address': 'fake',
'network_name': 'fake',
'network_ref': 'fake_ref',
'vif_model': 'fake_model'}]
self.assertEqual(expected, vif_info)
@mock.patch.object(vif, '_check_ovs_supported_version')
def test_get_neutron_network_ovs_integration_bridge(self,
mock_check):
self.flags(integration_bridge='fake-bridge-id', group='vmware')
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_OVS,
address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
network_ref = vif._get_neutron_network('fake-session',
'fake-cluster',
vif_info)
expected_ref = {'type': 'OpaqueNetwork',
'network-id': 'fake-bridge-id',
'network-type': 'opaque',
'use-external-id': False}
self.assertEqual(expected_ref, network_ref)
mock_check.assert_called_once_with('fake-session')
@mock.patch.object(vif, '_check_ovs_supported_version')
def test_get_neutron_network_ovs(self, mock_check):
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_OVS,
address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
network_ref = vif._get_neutron_network('fake-session',
'fake-cluster',
vif_info)
expected_ref = {'type': 'OpaqueNetwork',
'network-id': 0,
'network-type': 'nsx.LogicalSwitch',
'use-external-id': True}
self.assertEqual(expected_ref, network_ref)
mock_check.assert_called_once_with('fake-session')
@mock.patch.object(vif, '_check_ovs_supported_version')
def test_get_neutron_network_ovs_logical_switch_id(self, mock_check):
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_OVS,
address='DE:AD:BE:EF:00:00',
network=self._network,
details={'nsx-logical-switch-id':
'fake-nsx-id'})]
)[0]
network_ref = vif._get_neutron_network('fake-session',
'fake-cluster',
vif_info)
expected_ref = {'type': 'OpaqueNetwork',
'network-id': 'fake-nsx-id',
'network-type': 'nsx.LogicalSwitch',
'use-external-id': True}
self.assertEqual(expected_ref, network_ref)
mock_check.assert_called_once_with('fake-session')
@mock.patch.object(network_util, 'get_network_with_the_name')
def test_get_neutron_network_dvs(self, mock_network_name):
fake_network_obj = {'type': 'DistributedVirtualPortgroup',
'dvpg': 'fake-key',
'dvsw': 'fake-props'}
mock_network_name.return_value = fake_network_obj
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_DVS,
address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
network_ref = vif._get_neutron_network('fake-session',
'fake-cluster',
vif_info)
mock_network_name.assert_called_once_with('fake-session',
'fa0',
'fake-cluster')
self.assertEqual(fake_network_obj, network_ref)
@mock.patch.object(network_util, 'get_network_with_the_name')
def test_get_neutron_network_dvs_vif_details(self, mock_network_name):
fake_network_obj = {'type': 'DistributedVirtualPortgroup',
'dvpg': 'pg1',
'dvsw': 'fake-props'}
mock_network_name.return_value = fake_network_obj
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_DVS,
details={'dvs_port_key': 'key1',
'dvs_port_group_name': 'pg1'},
address='DE:AD:BE:EF:00:00',
network=self._network)])[0]
network_ref = vif._get_neutron_network('fake-session',
'fake-cluster',
vif_info)
mock_network_name.assert_called_once_with('fake-session',
'pg1',
'fake-cluster')
self.assertEqual(fake_network_obj, network_ref)
@mock.patch.object(network_util, 'get_network_with_the_name',
return_value=None)
def test_get_neutron_network_dvs_no_match(self, mock_network_name):
vif_info = network_model.NetworkInfo([
network_model.VIF(type=network_model.VIF_TYPE_DVS,
address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
self.assertRaises(exception.NetworkNotFoundForBridge,
vif._get_neutron_network,
'fake-session',
'fake-cluster',
vif_info)
def test_get_neutron_network_invalid_type(self):
vif_info = network_model.NetworkInfo([
network_model.VIF(address='DE:AD:BE:EF:00:00',
network=self._network)]
)[0]
self.assertRaises(exception.InvalidInput,
vif._get_neutron_network,
'fake-session',
'fake-cluster',
vif_info)
@mock.patch.object(vif.LOG, 'warning')
@mock.patch.object(vim_util, 'get_vc_version',
return_value='5.0.0')
def test_check_invalid_ovs_version(self, mock_version, mock_warning):
vif._check_ovs_supported_version('fake_session')
# assert that the min version is in a warning message
expected_arg = {'version': constants.MIN_VC_OVS_VERSION}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertTrue(version_arg_found)
| |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import absolute_import, unicode_literals
import copy
import inspect
import json
import os
import sys
from types import GeneratorType
import pytest
import logging
import six
from .fake_api import openshift, osbs
from osbs.build.manipulate import DockJsonManipulator
from osbs.build.build_response import BuildResponse
from osbs.build.build_request import BuildManager, BuildRequest
from osbs.build.build_request import SimpleBuild, ProductionBuild, ProductionWithoutKojiBuild
from osbs.build.spec import BuildIDParam
from osbs.constants import BUILD_FINISHED_STATES
from osbs.constants import SIMPLE_BUILD_TYPE, PROD_BUILD_TYPE, PROD_WITHOUT_KOJI_BUILD_TYPE
from osbs.constants import PROD_WITH_SECRET_BUILD_TYPE
from osbs.exceptions import OsbsValidationException
from osbs.http import Response
from tests.constants import TEST_BUILD, TEST_LABEL, TEST_LABEL_VALUE
from tests.fake_api import ResponseMapping, DEFINITION
logger = logging.getLogger("osbs.tests")
class NoSuchPluginException(Exception):
pass
def get_plugin(plugins, plugin_type, plugin_name):
plugins = plugins[plugin_type]
for plugin in plugins:
if plugin["name"] == plugin_name:
return plugin
else:
raise NoSuchPluginException()
def plugin_value_get(plugins, plugin_type, plugin_name, *args):
result = get_plugin(plugins, plugin_type, plugin_name)
for arg in args:
result = result[arg]
return result
def test_set_labels_on_build(openshift):
l = openshift.set_labels_on_build(TEST_BUILD, {TEST_LABEL: TEST_LABEL_VALUE})
assert l.json() is not None
def test_get_oauth_token(openshift):
token = openshift.get_oauth_token()
assert token is not None
def test_list_builds(openshift):
l = openshift.list_builds()
assert l is not None
assert bool(l.json()) # is there at least something
#####
#
# build/DockJsonManipulator
#
#####
BUILD_JSON = {
"metadata": {
"name": "{{NAME}}"
},
"kind": "Build",
"apiVersion": "v1beta1",
"parameters": {
"source": {
"type": "Git",
"git": {
"uri": "{{GIT_URI}}"
}
},
"strategy": {
"type": "Custom",
"customStrategy": {
"image": "buildroot",
"exposeDockerSocket": True,
"env": [{
"name": "DOCK_PLUGINS",
"value": "TBD"
}]
}
},
"output": {
"imageTag": "{{OUTPUT_IMAGE_TAG}}",
"registry": "{{REGISTRY_URI}}"
}
}
}
INNER_DOCK_JSON = {
"prebuild_plugins": [
{
"name": "change_from_in_dockerfile"
},
{
"args": {
"key1": {
"a": "1",
"b": "2"
},
"key2": "b"
},
"name": "a_plugin"
},
],
"postbuild_plugins": [
{
"args": {
"image_id": "BUILT_IMAGE_ID"
},
"name": "all_rpm_packages"
},
]
}
def test_manipulator():
m = DockJsonManipulator(BUILD_JSON, INNER_DOCK_JSON)
assert m is not None
def test_manipulator_remove_plugin():
inner = copy.deepcopy(INNER_DOCK_JSON)
m = DockJsonManipulator(BUILD_JSON, inner)
m.remove_plugin("postbuild_plugins", "all_rpm_packages")
assert len([x for x in inner["postbuild_plugins"] if x.get("all_rpm_packages", None)]) == 0
def test_manipulator_remove_nonexisting_plugin():
inner = copy.deepcopy(INNER_DOCK_JSON)
m = DockJsonManipulator(BUILD_JSON, inner)
m.remove_plugin("postbuild_plugins", "this-doesnt-exist")
def test_manipulator_get_dock_json():
build_json = copy.deepcopy(BUILD_JSON)
env_json = build_json['parameters']['strategy']['customStrategy']['env']
p = [env for env in env_json if env["name"] == "DOCK_PLUGINS"]
inner = {
"a": "b"
}
p[0]['value'] = json.dumps(inner)
m = DockJsonManipulator(build_json, None)
response = m.get_dock_json()
assert response["a"] == inner["a"]
def test_manipulator_get_dock_json_missing_input():
build_json = copy.deepcopy(BUILD_JSON)
build_json['parameters']['strategy']['customStrategy']['env'] = None
m = DockJsonManipulator(build_json, None)
with pytest.raises(RuntimeError):
m.get_dock_json()
def test_manipulator_merge():
inner = copy.deepcopy(INNER_DOCK_JSON)
plugin = [x for x in inner['prebuild_plugins'] if x["name"] == "a_plugin"][0]
m = DockJsonManipulator(None, inner)
m.dock_json_merge_arg("prebuild_plugins", "a_plugin", "key1", {"a": '3', "z": '9'})
assert plugin['args']['key1']['a'] == '3'
assert plugin['args']['key1']['b'] == '2'
assert plugin['args']['key1']['z'] == '9'
def test_render_simple_request():
this_file = inspect.getfile(test_render_prod_request)
this_dir = os.path.dirname(this_file)
parent_dir = os.path.dirname(this_dir)
inputs_path = os.path.join(parent_dir, "inputs")
bm = BuildManager(inputs_path)
build_request = bm.get_build_request_by_type("simple")
kwargs = {
'git_uri': "http://git/",
'git_ref': "master",
'user': "john-foo",
'component': "component",
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
}
build_request.set_params(**kwargs)
build_json = build_request.render()
assert build_json["metadata"]["name"].startswith("component-")
assert build_json["parameters"]["source"]['git']['uri'] == "http://git/"
assert build_json["parameters"]["source"]['git']['ref'] == "master"
assert build_json["parameters"]["output"]['registry'] == "registry.example.com"
assert build_json["parameters"]["output"]['imageTag'].startswith(
"john-foo/component:"
)
env_vars = build_json['parameters']['strategy']['customStrategy']['env']
plugins_json = None
for d in env_vars:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
assert plugin_value_get(plugins, "postbuild_plugins", "store_metadata_in_osv3", "args", "url") == \
"http://openshift/"
def test_render_prod_request_with_repo():
this_file = inspect.getfile(test_render_prod_request)
this_dir = os.path.dirname(this_file)
parent_dir = os.path.dirname(this_dir)
inputs_path = os.path.join(parent_dir, "inputs")
bm = BuildManager(inputs_path)
build_request = bm.get_build_request_by_type(PROD_BUILD_TYPE)
kwargs = {
'git_uri': "http://git/",
'git_ref': "master",
'user': "john-foo",
'component': "component",
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
'yum_repourls': ["http://example.com/my.repo"],
}
build_request.set_params(**kwargs)
build_json = build_request.render()
assert build_json["metadata"]["name"].startswith("component-")
assert build_json["parameters"]["source"]['git']['uri'] == "http://git/"
assert build_json["parameters"]["source"]['git']['ref'] == "master"
assert build_json["parameters"]["output"]['registry'] == "registry.example.com"
assert build_json["parameters"]["output"]['imageTag'].startswith(
"john-foo/component:"
)
env_vars = build_json['parameters']['strategy']['customStrategy']['env']
plugins_json = None
for d in env_vars:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
assert plugin_value_get(plugins, "prebuild_plugins", "distgit_fetch_artefacts", "args", "command") == "make"
assert plugin_value_get(plugins, "prebuild_plugins", "change_source_registry", "args", "registry_uri") == \
"registry.example.com"
assert plugin_value_get(plugins, "postbuild_plugins", "tag_by_labels", "args", "registry_uri") == \
"registry.example.com"
assert plugin_value_get(plugins, "postbuild_plugins", "store_metadata_in_osv3", "args", "url") == \
"http://openshift/"
with pytest.raises(NoSuchPluginException):
assert get_plugin(plugins, "prebuild_plugins", "koji")
plugin_value_get(plugins, "prebuild_plugins", "add_yum_repo_by_url", "args", "repourls") == \
["http://example.com/my.repo"]
labels = plugin_value_get(plugins, "prebuild_plugins", "add_labels_in_dockerfile", "args", "labels")
assert labels is not None
assert labels['Architecture'] is not None
assert labels['Authoritative_Registry'] is not None
assert labels['Build_Host'] is not None
assert labels['Vendor'] is not None
def test_render_prod_request():
this_file = inspect.getfile(test_render_prod_request)
this_dir = os.path.dirname(this_file)
parent_dir = os.path.dirname(this_dir)
inputs_path = os.path.join(parent_dir, "inputs")
bm = BuildManager(inputs_path)
build_request = bm.get_build_request_by_type(PROD_BUILD_TYPE)
kwargs = {
'git_uri': "http://git/",
'git_ref': "master",
'user': "john-foo",
'component': "component",
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
}
build_request.set_params(**kwargs)
build_json = build_request.render()
assert build_json["metadata"]["name"].startswith("component-")
assert build_json["parameters"]["source"]['git']['uri'] == "http://git/"
assert build_json["parameters"]["source"]['git']['ref'] == "master"
assert build_json["parameters"]["output"]['registry'] == "registry.example.com"
assert build_json["parameters"]["output"]['imageTag'].startswith(
"john-foo/component:"
)
env_vars = build_json['parameters']['strategy']['customStrategy']['env']
plugins_json = None
for d in env_vars:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
assert plugin_value_get(plugins, "prebuild_plugins", "distgit_fetch_artefacts", "args", "command") == "make"
assert plugin_value_get(plugins, "prebuild_plugins", "change_source_registry", "args", "registry_uri") == \
"registry.example.com"
assert plugin_value_get(plugins, "postbuild_plugins", "tag_by_labels", "args", "registry_uri") == \
"registry.example.com"
assert plugin_value_get(plugins, "postbuild_plugins", "store_metadata_in_osv3", "args", "url") == \
"http://openshift/"
assert plugin_value_get(plugins, "prebuild_plugins", "koji", "args", "root") == "http://root/"
assert plugin_value_get(plugins, "prebuild_plugins", "koji", "args", "target") == "koji-target"
assert plugin_value_get(plugins, "prebuild_plugins", "koji", "args", "hub") == "http://hub/"
labels = plugin_value_get(plugins, "prebuild_plugins", "add_labels_in_dockerfile", "args", "labels")
assert labels is not None
assert labels['Architecture'] is not None
assert labels['Authoritative_Registry'] is not None
assert labels['Build_Host'] is not None
assert labels['Vendor'] is not None
def test_render_prod_without_koji_request():
this_file = inspect.getfile(test_render_prod_request)
this_dir = os.path.dirname(this_file)
parent_dir = os.path.dirname(this_dir)
inputs_path = os.path.join(parent_dir, "inputs")
bm = BuildManager(inputs_path)
build_request = bm.get_build_request_by_type(PROD_WITHOUT_KOJI_BUILD_TYPE)
kwargs = {
'git_uri': "http://git/",
'git_ref': "master",
'user': "john-foo",
'component': "component",
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
}
build_request.set_params(**kwargs)
build_json = build_request.render()
assert build_json["metadata"]["name"].startswith("component-")
assert build_json["parameters"]["source"]['git']['uri'] == "http://git/"
assert build_json["parameters"]["source"]['git']['ref'] == "master"
assert build_json["parameters"]["output"]['registry'] == "registry.example.com"
assert build_json["parameters"]["output"]['imageTag'].startswith(
"john-foo/component:"
)
env_vars = build_json['parameters']['strategy']['customStrategy']['env']
plugins_json = None
for d in env_vars:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
assert plugin_value_get(plugins, "prebuild_plugins", "distgit_fetch_artefacts", "args", "command") == "make"
assert plugin_value_get(plugins, "prebuild_plugins", "change_source_registry", "args", "registry_uri") == \
"registry.example.com"
assert plugin_value_get(plugins, "postbuild_plugins", "tag_by_labels", "args", "registry_uri") == \
"registry.example.com"
assert plugin_value_get(plugins, "postbuild_plugins", "store_metadata_in_osv3", "args", "url") == \
"http://openshift/"
labels = plugin_value_get(plugins, "prebuild_plugins", "add_labels_in_dockerfile", "args", "labels")
assert labels is not None
assert labels['Architecture'] is not None
assert labels['Authoritative_Registry'] is not None
assert labels['Build_Host'] is not None
assert labels['Vendor'] is not None
def test_render_prod_with_secret_request():
this_file = inspect.getfile(test_render_prod_request)
this_dir = os.path.dirname(this_file)
parent_dir = os.path.dirname(this_dir)
inputs_path = os.path.join(parent_dir, "inputs")
bm = BuildManager(inputs_path)
build_request = bm.get_build_request_by_type(PROD_WITH_SECRET_BUILD_TYPE)
kwargs = {
'git_uri': "http://git/",
'git_ref': "master",
'user': "john-foo",
'component': "component",
'registry_uri': "",
'pulp_registry': "registry.example.com",
'nfs_server_path': "server:path",
'openshift_uri': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
'source_secret': 'mysecret',
}
build_request.set_params(**kwargs)
build_json = build_request.render()
assert build_json["parameters"]["source"]["sourceSecretName"] == "mysecret"
def test_render_with_yum_repourls():
this_file = inspect.getfile(test_render_prod_request)
this_dir = os.path.dirname(this_file)
parent_dir = os.path.dirname(this_dir)
inputs_path = os.path.join(parent_dir, "inputs")
bm = BuildManager(inputs_path)
kwargs = {
'git_uri': "http://git/",
'git_ref': "master",
'user': "john-foo",
'component': "component",
'registry_uri': "registry.example.com",
'openshift_uri': "http://openshift/",
'koji_target': "koji-target",
'kojiroot': "http://root/",
'kojihub': "http://hub/",
'sources_command': "make",
'architecture': "x86_64",
'vendor': "Foo Vendor",
'build_host': "our.build.host.example.com",
'authoritative_registry': "registry.example.com",
}
build_request = bm.get_build_request_by_type("prod")
# Test validation for yum_repourls parameter
kwargs['yum_repourls'] = 'should be a list'
with pytest.raises(OsbsValidationException):
build_request.set_params(**kwargs)
# Use a valid yum_repourls parameter and check the result
kwargs['yum_repourls'] = ['http://example.com/repo1.repo',
'http://example.com/repo2.repo']
build_request.set_params(**kwargs)
build_json = build_request.render()
strategy = build_json['parameters']['strategy']['customStrategy']['env']
plugins_json = None
for d in strategy:
if d['name'] == 'DOCK_PLUGINS':
plugins_json = d['value']
break
assert plugins_json is not None
plugins = json.loads(plugins_json)
repourls = None
for d in plugins['prebuild_plugins']:
if d['name'] == 'add_yum_repo_by_url':
repourls = d['args']['repourls']
assert repourls is not None
assert len(repourls) == 2
assert 'http://example.com/repo1.repo' in repourls
assert 'http://example.com/repo2.repo' in repourls
def test_get_user(openshift):
l = openshift.get_user()
assert l.json() is not None
def test_watch_build(openshift):
response = openshift.wait_for_build_to_finish(TEST_BUILD)
status_lower = response["status"].lower()
assert response["metadata"]["name"] == TEST_BUILD
assert status_lower in BUILD_FINISHED_STATES
assert isinstance(TEST_BUILD, six.text_type)
assert isinstance(status_lower, six.text_type)
def test_create_build(openshift):
response = openshift.create_build({})
assert response is not None
assert response.json()["metadata"]["name"] == TEST_BUILD
assert response.json()["status"].lower() in BUILD_FINISHED_STATES
## API tests (osbs.api.OSBS)
def test_list_builds_api(osbs):
response_list = osbs.list_builds()
# We should get a response
assert response_list is not None
assert len(response_list) > 0
# response_list is a list of BuildResponse objects
assert isinstance(response_list[0], BuildResponse)
def test_wait_for_build_to_finish(osbs):
build_response = osbs.wait_for_build_to_finish(TEST_BUILD)
assert isinstance(build_response, BuildResponse)
def test_get_build_api(osbs):
response = osbs.get_build(TEST_BUILD)
# We should get a BuildResponse
assert isinstance(response, BuildResponse)
def test_get_build_request_api(osbs):
build = osbs.get_build_request()
assert isinstance(build, BuildRequest)
simple = osbs.get_build_request(SIMPLE_BUILD_TYPE)
assert isinstance(simple, SimpleBuild)
prod = osbs.get_build_request(PROD_BUILD_TYPE)
assert isinstance(prod, ProductionBuild)
prodwithoutkoji = osbs.get_build_request(PROD_WITHOUT_KOJI_BUILD_TYPE)
assert isinstance(prodwithoutkoji, ProductionWithoutKojiBuild)
def test_set_labels_on_build_api(osbs):
labels = {'label1': 'value1', 'label2': 'value2'}
response = osbs.set_labels_on_build(TEST_BUILD, labels)
assert isinstance(response, Response)
def test_get_token_api(osbs):
assert isinstance(osbs.get_token(), bytes)
def test_get_user_api(osbs):
assert 'fullName' in osbs.get_user()
def test_build_logs_api(osbs):
logs = osbs.get_build_logs(TEST_BUILD)
assert isinstance(logs, tuple(list(six.string_types) + [bytes]))
assert logs == b"line 1"
def test_build_logs_api_follow(osbs):
logs = osbs.get_build_logs(TEST_BUILD, follow=True)
assert isinstance(logs, GeneratorType)
assert next(logs) == "line 1"
with pytest.raises(StopIteration):
assert next(logs)
@pytest.mark.parametrize('decode_docker_logs', [True, False])
def test_build_logs_api_from_docker(osbs, decode_docker_logs):
logs = osbs.get_docker_build_logs(TEST_BUILD, decode_logs=decode_docker_logs)
assert isinstance(logs, tuple(list(six.string_types) + [bytes]))
assert logs.split('\n')[0].find("Step ") != -1
@pytest.mark.skipif(sys.version_info[0] >= 3,
reason="known not to work on Python 3 (#74)")
def test_parse_headers():
rm = ResponseMapping("0.4.1")
file_name = DEFINITION["/oauth/authorize"]["get"]["file"]
raw_headers = rm.get_response_content(file_name)
r = Response(raw_headers=raw_headers)
assert r.headers is not None
assert len(r.headers.items()) > 0
assert r.headers["location"]
def test_build_id_param_shorten_id():
p = BuildIDParam()
p.value = "x" * 63
val = p.value
assert len(val) == 63
def test_build_id_param_raise_exc():
p = BuildIDParam()
with pytest.raises(OsbsValidationException):
p.value = r"\\\\@@@@||||"
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.cache()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from os import path
import shutil
import tempfile
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class FileCacheTest(test_base.DatasetTestBase, parameterized.TestCase):
def setUp(self):
super(FileCacheTest, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
self.cache_prefix = path.join(self.tmp_dir, "cache")
def tearDown(self):
if self.tmp_dir:
shutil.rmtree(self.tmp_dir, ignore_errors=True)
super(FileCacheTest, self).tearDown()
@combinations.generate(test_base.default_test_combinations())
def testCacheDatasetPassthrough(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
def dataset_fn(count=5, filename=None):
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(count))
if filename:
return repeat_dataset.cache(filename)
else:
return repeat_dataset
self.assertEqual(
tuple([c.shape[1:] for c in components]),
dataset_ops.get_legacy_output_shapes(dataset_fn()))
get_next = self.getNext(dataset_fn())
# First run without caching to collect the "ground truth".
elements = []
for _ in range(20):
elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Assert that the cached dataset has the same elements as the
# "ground truth".
get_next = self.getNext(dataset_fn(filename=self.cache_prefix))
cached_elements = []
for _ in range(20):
cached_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(elements, cached_elements)
# Re-initialize with an empty upstream (to throw errors.OutOfRangeError
# if we didn't use the cache).
get_next = self.getNext(dataset_fn(count=0, filename=self.cache_prefix))
replayed_elements = []
for _ in range(20):
replayed_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(cached_elements, replayed_elements)
# Re-initialize with an empty upstream and a missing cache file (should
# throw errors.OutOfRangeError immediately).
get_next = self.getNext(
dataset_fn(count=0, filename=self.cache_prefix + "nonsense"))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testConcurrentWriters(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
cache_dataset1 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
cache_dataset2 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
self.evaluate(get_next1()) # this should succeed
with self.assertRaises(errors.AlreadyExistsError):
self.evaluate(get_next2())
self.evaluate(get_next1()) # this should continue to succeed
@combinations.generate(test_base.default_test_combinations())
def testConcurrentReaders(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
cache_dataset1 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
cache_dataset2 = (
dataset_ops.Dataset.from_tensor_slices(components).cache(
self.cache_prefix))
get_next1 = self.getNext(cache_dataset1)
get_next2 = self.getNext(cache_dataset2)
elements = []
for _ in range(4):
elements.append(self.evaluate(get_next1()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
# Re-initialize
get_next1 = self.getNext(cache_dataset1, requires_initialization=True)
get_next2 = self.getNext(cache_dataset2, requires_initialization=True)
# Reading concurrently should succeed.
elements_itr1 = []
elements_itr2 = []
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
# Intentionally reversing the order
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
elements_itr1.append(self.evaluate(get_next1()))
elements_itr2.append(self.evaluate(get_next2()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next2())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
self.assertAllEqual(elements, elements_itr1)
self.assertAllEqual(elements, elements_itr2)
@combinations.generate(test_base.default_test_combinations())
def testReadingPastEndOfSequence(self):
dataset = dataset_ops.Dataset.range(10).cache(self.cache_prefix)
dataset = dataset.map(lambda a: a).batch(4).repeat(2)
expected_output = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]] * 2
self.assertDatasetProduces(dataset, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testCleaningUpCacheFiles(self):
def do_test(i):
dataset = dataset_ops.Dataset.range(10).cache(self.cache_prefix)
get_next = self.getNext(dataset)
for _ in range(i):
try:
self.evaluate(get_next())
except errors.OutOfRangeError:
break
if not context.executing_eagerly():
self.skipTest(
"Test requires eager mode for iterators to be deconstructed")
for i in [0, 3, 10, 12, 15]:
do_test(i)
class MemoryCacheTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testCacheDatasetPassthrough(self):
with ops.device("cpu:0"):
repeat_count = variables.Variable(constant_op.constant(10, dtypes.int64))
dataset = dataset_ops.Dataset.range(3).flat_map(
lambda x: dataset_ops.Dataset.from_tensors(x).repeat(repeat_count))
cached_dataset = dataset.cache().repeat(2)
uncached_dataset = dataset.repeat(2)
self.evaluate(repeat_count.initializer)
# Needs to be initializable to capture the variable.
cached_next = self.getNext(cached_dataset, requires_initialization=True)
uncached_next = self.getNext(
uncached_dataset, requires_initialization=True)
for i in range(3):
for _ in range(10):
self.assertEqual(self.evaluate(cached_next()), i)
self.assertEqual(self.evaluate(uncached_next()), i)
self.evaluate(repeat_count.assign(0))
# The uncached iterator should now be empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(uncached_next())
# The cached iterator replays from cache.
for i in range(3):
for _ in range(10):
self.assertEqual(self.evaluate(cached_next()), i)
# The cached iterator should now be empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(cached_next())
@combinations.generate(test_base.default_test_combinations())
def testEmptyCacheReading(self):
components = (np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0]))
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(0))
cache_dataset = repeat_dataset.cache()
self.assertDatasetProduces(cache_dataset, expected_output=[])
@combinations.generate(test_base.default_test_combinations())
def testConcurrentReaders(self):
dataset_fn = lambda: dataset_ops.Dataset.range(5).cache()
d1 = dataset_fn().map(lambda x: x + 1)
d2 = dataset_fn().map(lambda x: x + 6)
get_next1 = self.getNext(d1)
self.assertEqual(1, self.evaluate(get_next1()))
self.assertEqual(2, self.evaluate(get_next1()))
self.assertEqual(3, self.evaluate(get_next1()))
get_next2 = self.getNext(d2)
self.assertEqual(6, self.evaluate(get_next2()))
self.assertEqual(7, self.evaluate(get_next2()))
self.assertEqual(4, self.evaluate(get_next1())) # interleave execution
self.assertEqual([8, 5],
[self.evaluate(get_next2()),
self.evaluate(get_next1())])
self.assertEqual(9, self.evaluate(get_next2()))
self.assertEqual(10, self.evaluate(get_next2()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next2())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next1())
@combinations.generate(test_base.default_test_combinations())
def testCacheTakeRepeat(self):
dataset = dataset_ops.Dataset.range(10).cache().take(5).repeat(2)
expected_output = [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
self.assertDatasetProduces(dataset, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testCacheRepeatEpochs(self):
counter = variables.Variable(0)
self.evaluate(counter.initializer)
def increment_fn(x):
counter.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10).map(increment_fn).cache().repeat(2)
get_next = self.getNext(dataset, requires_initialization=True)
# first epoch
for i in range(10):
self.assertEqual(i, self.evaluate(counter))
self.assertEqual(i, self.evaluate(get_next()))
# second epoch
for i in range(10):
self.assertEqual(10, self.evaluate(counter))
self.assertEqual(i, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testCacheIterationEpochs(self):
counter = variables.Variable(0)
self.evaluate(counter.initializer)
def increment_fn(x):
counter.assign_add(1)
return x
dataset = dataset_ops.Dataset.range(10).map(increment_fn).cache()
# first epoch
i = 0
for elem in dataset:
self.assertEqual(i, self.evaluate(elem))
i += 1
self.assertEqual(i, self.evaluate(counter))
# second epoch
i = 0
for elem in dataset:
self.assertEqual(10, self.evaluate(counter))
self.assertEqual(i, self.evaluate(elem))
i += 1
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testCacheV2ResourceCapture(self):
def make_dataset():
ids = dataset_ops.Dataset.range(10)
ids = ids.cache()
def interleave_fn(dataset, _):
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.interleave(functools.partial(interleave_fn, ids))
return dataset
results = []
for elem in make_dataset():
results.append(elem.numpy())
self.assertAllEqual(results, range(10))
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testCacheV2ConcurrentIterators(self):
dataset = dataset_ops.Dataset.range(10).cache()
it1 = iter(dataset)
it2 = iter(dataset)
for i in range(10):
self.assertEqual(next(it1), i)
self.assertEqual(next(it2), i)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testCacheKnownCardinality(self):
# Check that a dataset which produces random permutation of range(10) ends
# up being cached when we read all of its element but do not reach EOF.
dataset = dataset_ops.Dataset.range(10)
dataset = dataset.shuffle(10, reshuffle_each_iteration=True).cache()
it = iter(dataset)
results = []
for _ in range(10):
results.append(next(it))
it = iter(dataset)
for i in range(10):
self.assertEqual(next(it), results[i])
if __name__ == "__main__":
test.main()
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class aaauser_auditnslogpolicy_binding(base_resource) :
""" Binding class showing the auditnslogpolicy that can be bound to aaauser.
"""
def __init__(self) :
self._policy = ""
self._priority = 0
self._acttype = 0
self._username = ""
self.___count = 0
@property
def priority(self) :
ur"""The priority of the policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""The priority of the policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def policy(self) :
ur"""The policy Name.
"""
try :
return self._policy
except Exception as e:
raise e
@policy.setter
def policy(self, policy) :
ur"""The policy Name.
"""
try :
self._policy = policy
except Exception as e:
raise e
@property
def username(self) :
ur"""User account to which to bind the policy.<br/>Minimum length = 1.
"""
try :
return self._username
except Exception as e:
raise e
@username.setter
def username(self, username) :
ur"""User account to which to bind the policy.<br/>Minimum length = 1
"""
try :
self._username = username
except Exception as e:
raise e
@property
def acttype(self) :
try :
return self._acttype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(aaauser_auditnslogpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.aaauser_auditnslogpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.username is not None :
return str(self.username)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = aaauser_auditnslogpolicy_binding()
updateresource.username = resource.username
updateresource.policy = resource.policy
updateresource.priority = resource.priority
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [aaauser_auditnslogpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].username = resource[i].username
updateresources[i].policy = resource[i].policy
updateresources[i].priority = resource[i].priority
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = aaauser_auditnslogpolicy_binding()
deleteresource.username = resource.username
deleteresource.policy = resource.policy
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [aaauser_auditnslogpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].username = resource[i].username
deleteresources[i].policy = resource[i].policy
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, username) :
ur""" Use this API to fetch aaauser_auditnslogpolicy_binding resources.
"""
try :
obj = aaauser_auditnslogpolicy_binding()
obj.username = username
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, username, filter_) :
ur""" Use this API to fetch filtered set of aaauser_auditnslogpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaauser_auditnslogpolicy_binding()
obj.username = username
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, username) :
ur""" Use this API to count aaauser_auditnslogpolicy_binding resources configued on NetScaler.
"""
try :
obj = aaauser_auditnslogpolicy_binding()
obj.username = username
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, username, filter_) :
ur""" Use this API to count the filtered set of aaauser_auditnslogpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = aaauser_auditnslogpolicy_binding()
obj.username = username
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class aaauser_auditnslogpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.aaauser_auditnslogpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.aaauser_auditnslogpolicy_binding = [aaauser_auditnslogpolicy_binding() for _ in range(length)]
| |
"""
The Cobweb module contains the :class:`CobwebTree` and :class:`CobwebNode`
classes which are used to achieve the basic Cobweb functionality.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from random import shuffle
from random import random
from math import log
from concept_formation.utils import weighted_choice
from concept_formation.utils import most_likely_choice
class CobwebTree(object):
"""
The CobwebTree contains the knoweldge base of a partiucluar instance of the
cobweb algorithm and can be used to fit and categorize instances.
"""
def __init__(self):
"""
The tree constructor.
"""
self.root = CobwebNode()
self.root.tree = self
def clear(self):
"""
Clears the concepts of the tree.
"""
self.root = CobwebNode()
self.root.tree = self
def __str__(self):
return str(self.root)
def _sanity_check_instance(self, instance):
for attr in instance:
try:
hash(attr)
attr[0]
except Exception:
raise ValueError('Invalid attribute: '+str(attr) +
' of type: '+str(type(attr)) +
' in instance: '+str(instance) +
',\n'+type(self).__name__ +
' only works with hashable ' +
'and subscriptable attributes' +
' (e.g., strings).')
try:
hash(instance[attr])
except Exception:
raise ValueError('Invalid value: '+str(instance[attr]) +
' of type: '+str(type(instance[attr])) +
' in instance: '+str(instance) +
',\n'+type(self).__name__ +
' only works with hashable values.')
if instance[attr] is None:
raise ValueError("Attributes with value None should"
" be manually removed.")
def ifit(self, instance):
"""
Incrementally fit a new instance into the tree and return its resulting
concept.
The instance is passed down the cobweb tree and updates each node to
incorporate the instance. **This process modifies the tree's
knowledge** for a non-modifying version of labeling use the
:meth:`CobwebTree.categorize` function.
:param instance: An instance to be categorized into the tree.
:type instance: :ref:`Instance<instance-rep>`
:return: A concept describing the instance
:rtype: CobwebNode
.. seealso:: :meth:`CobwebTree.cobweb`
"""
self._sanity_check_instance(instance)
return self.cobweb(instance)
def fit(self, instances, iterations=1, randomize_first=True):
"""
Fit a collection of instances into the tree.
This is a batch version of the ifit function that takes a collection of
instances and categorizes all of them. The instances can be
incorporated multiple times to burn in the tree with prior knowledge.
Each iteration of fitting uses a randomized order but the first pass
can be done in the original order of the list if desired, this is
useful for initializing the tree with specific prior experience.
:param instances: a collection of instances
:type instances: [:ref:`Instance<instance-rep>`,
:ref:`Instance<instance-rep>`, ...]
:param iterations: number of times the list of instances should be fit.
:type iterations: int
:param randomize_first: whether or not the first iteration of fitting
should be done in a random order or in the list's original order.
:type randomize_first: bool
"""
instances = [i for i in instances]
for x in range(iterations):
if x == 0 and randomize_first:
shuffle(instances)
for i in instances:
self.ifit(i)
shuffle(instances)
def cobweb(self, instance):
"""
The core cobweb algorithm used in fitting and categorization.
In the general case, the cobweb algorithm entertains a number of
sorting operations for the instance and then commits to the operation
that maximizes the :meth:`category utility
<CobwebNode.category_utility>` of the tree at the current node and then
recurses.
At each node the alogrithm first calculates the category utility of
inserting the instance at each of the node's children, keeping the best
two (see: :meth:`CobwebNode.two_best_children
<CobwebNode.two_best_children>`), and then calculates the
category_utility of performing other operations using the best two
children (see: :meth:`CobwebNode.get_best_operation
<CobwebNode.get_best_operation>`), commiting to whichever operation
results in the highest category utility. In the case of ties an
operation is chosen at random.
In the base case, i.e. a leaf node, the algorithm checks to see if
the current leaf is an exact match to the current node. If it is, then
the instance is inserted and the leaf is returned. Otherwise, a new
leaf is created.
.. note:: This function is equivalent to calling
:meth:`CobwebTree.ifit` but its better to call ifit because it is
the polymorphic method siganture between the different cobweb
family algorithms.
:param instance: an instance to incorporate into the tree
:type instance: :ref:`Instance<instance-rep>`
:return: a concept describing the instance
:rtype: CobwebNode
.. seealso:: :meth:`CobwebTree.ifit`, :meth:`CobwebTree.categorize`
"""
current = self.root
while current:
# the current.count == 0 here is for the initially empty tree.
if not current.children and (current.is_exact_match(instance) or
current.count == 0):
# print("leaf match")
current.increment_counts(instance)
break
elif not current.children:
# print("fringe split")
new = current.__class__(current)
current.parent = new
new.children.append(current)
if new.parent:
new.parent.children.remove(current)
new.parent.children.append(new)
else:
self.root = new
new.increment_counts(instance)
current = new.create_new_child(instance)
break
else:
best1_cu, best1, best2 = current.two_best_children(instance)
_, best_action = current.get_best_operation(instance, best1,
best2, best1_cu)
# print(best_action)
if best_action == 'best':
current.increment_counts(instance)
current = best1
elif best_action == 'new':
current.increment_counts(instance)
current = current.create_new_child(instance)
break
elif best_action == 'merge':
current.increment_counts(instance)
new_child = current.merge(best1, best2)
current = new_child
elif best_action == 'split':
current.split(best1)
else:
raise Exception('Best action choice "' + best_action +
'" not a recognized option. This should be'
' impossible...')
return current
def _cobweb_categorize(self, instance):
"""
A cobweb specific version of categorize, not intended to be
externally called.
.. seealso:: :meth:`CobwebTree.categorize`
"""
current = self.root
while current:
if not current.children:
return current
_, best1, best2 = current.two_best_children(instance)
current = best1
def infer_missing(self, instance, choice_fn="most likely",
allow_none=True):
"""
Given a tree and an instance, returns a new instance with attribute
values picked using the specified choice function (either "most likely"
or "sampled").
.. todo:: write some kind of test for this.
:param instance: an instance to be completed.
:type instance: :ref:`Instance<instance-rep>`
:param choice_fn: a string specifying the choice function to use,
either "most likely" or "sampled".
:type choice_fn: a string
:param allow_none: whether attributes not in the instance can be
inferred to be missing. If False, then all attributes will be
inferred with some value.
:type allow_none: Boolean
:return: A completed instance
:rtype: :ref:`Instance<instance-rep>`
"""
self._sanity_check_instance(instance)
temp_instance = {a: instance[a] for a in instance}
concept = self._cobweb_categorize(temp_instance)
for attr in concept.attrs('all'):
if attr in temp_instance:
continue
val = concept.predict(attr, choice_fn, allow_none)
if val is not None:
temp_instance[attr] = val
return temp_instance
def categorize(self, instance):
"""
Sort an instance in the categorization tree and return its resulting
concept.
The instance is passed down the categorization tree according to the
normal cobweb algorithm except using only the best operator and without
modifying nodes' probability tables. **This process does not modify the
tree's knowledge** for a modifying version of labeling use the
:meth:`CobwebTree.ifit` function
:param instance: an instance to be categorized into the tree.
:type instance: :ref:`Instance<instance-rep>`
:return: A concept describing the instance
:rtype: CobwebNode
.. seealso:: :meth:`CobwebTree.cobweb`
"""
self._sanity_check_instance(instance)
return self._cobweb_categorize(instance)
class CobwebNode(object):
"""
A CobwebNode represents a concept within the knoweldge base of a particular
:class:`CobwebTree`. Each node contains a probability table that can be
used to calculate the probability of different attributes given the concept
that the node represents.
In general the :meth:`CobwebTree.ifit`, :meth:`CobwebTree.categorize`
functions should be used to initially interface with the Cobweb knowledge
base and then the returned concept can be used to calculate probabilities
of certain attributes or determine concept labels.
This constructor creates a CobwebNode with default values. It can also be
used as a copy constructor to "deepcopy" a node, including all references
to other parts of the original node's CobwebTree.
:param otherNode: Another concept node to deepcopy.
:type otherNode: CobwebNode
"""
# a counter used to generate unique concept names.
_counter = 0
def __init__(self, otherNode=None):
"""Create a new CobwebNode"""
self.concept_id = self.gensym()
self.count = 0.0
self.av_counts = {}
self.children = []
self.parent = None
self.tree = None
if otherNode:
self.tree = otherNode.tree
self.parent = otherNode.parent
self.update_counts_from_node(otherNode)
for child in otherNode.children:
self.children.append(self.__class__(child))
def shallow_copy(self):
"""
Create a shallow copy of the current node (and not its children)
This can be used to copy only the information relevant to the node's
probability table without maintaining reference to other elements of
the tree, except for the root which is necessary to calculate category
utility.
"""
temp = self.__class__()
temp.tree = self.tree
temp.parent = self.parent
temp.update_counts_from_node(self)
return temp
def attrs(self, attr_filter=None):
"""
Iterates over the attributes present in the node's attribute-value
table with the option to filter certain types. By default the filter
will ignore hidden attributes and yield all others. If the string 'all'
is provided then all attributes will be yielded. In neither of those
cases the filter will be interpreted as a function that returns true if
an attribute should be yielded and false otherwise.
"""
if attr_filter is None:
return filter(lambda x: x[0] != "_", self.av_counts)
elif attr_filter == 'all':
return self.av_counts
else:
return filter(attr_filter, self.av_counts)
def increment_counts(self, instance):
"""
Increment the counts at the current node according to the specified
instance.
:param instance: A new instances to incorporate into the node.
:type instance: :ref:`Instance<instance-rep>`
"""
self.count += 1
for attr in instance:
if attr not in self.av_counts:
self.av_counts[attr] = {}
if instance[attr] not in self.av_counts[attr]:
self.av_counts[attr][instance[attr]] = 0
self.av_counts[attr][instance[attr]] += 1
def update_counts_from_node(self, node):
"""
Increments the counts of the current node by the amount in the
specified node.
This function is used as part of copying nodes and in merging nodes.
:param node: Another node from the same CobwebTree
:type node: CobwebNode
"""
self.count += node.count
for attr in node.attrs('all'):
if attr not in self.av_counts:
self.av_counts[attr] = {}
for val in node.av_counts[attr]:
if val not in self.av_counts[attr]:
self.av_counts[attr][val] = 0
self.av_counts[attr][val] += node.av_counts[attr][val]
def expected_correct_guesses(self):
"""
Returns the number of correct guesses that are expected from the given
concept.
This is the sum of the probability of each attribute value squared.
This function is used in calculating category utility.
:return: the number of correct guesses that are expected from the given
concept.
:rtype: float
"""
correct_guesses = 0.0
attr_count = 0
for attr in self.attrs():
attr_count += 1
if attr in self.av_counts:
for val in self.av_counts[attr]:
prob = (self.av_counts[attr][val]) / self.count
correct_guesses += (prob * prob)
return correct_guesses / attr_count
def category_utility(self):
"""
Return the category utility of a particular division of a concept into
its children.
Category utility is always calculated in reference to a parent node and
its own children. This is used as the heuristic to guide the concept
formation process. Category Utility is calculated as:
.. math::
CU(\\{C_1, C_2, \\cdots, C_n\\}) = \\frac{1}{n} \\sum_{k=1}^n
P(C_k) \\left[ \\sum_i \\sum_j P(A_i = V_{ij} | C_k)^2 \\right] -
\\sum_i \\sum_j P(A_i = V_{ij})^2
where :math:`n` is the numer of children concepts to the current node,
:math:`P(C_k)` is the probability of a concept given the current node,
:math:`P(A_i = V_{ij} | C_k)` is the probability of a particular
attribute value given the concept :math:`C_k`, and :math:`P(A_i =
V_{ij})` is the probability of a particular attribute value given the
current node.
In general this is used as an internal function of the cobweb algorithm
but there may be times when it would be useful to call outside of the
algorithm itself.
:return: The category utility of the current node with respect to its
children.
:rtype: float
"""
if len(self.children) == 0:
return 0.0
child_correct_guesses = 0.0
for child in self.children:
p_of_child = child.count / self.count
child_correct_guesses += (p_of_child *
child.expected_correct_guesses())
return ((child_correct_guesses - self.expected_correct_guesses()) /
len(self.children))
def get_best_operation(self, instance, best1, best2, best1_cu,
possible_ops=["best", "new", "merge", "split"]):
"""
Given an instance, the two best children based on category utility and
a set of possible operations, find the operation that produces the
highest category utility, and then return the category utility and name
for the best operation. In the case of ties, an operator is randomly
chosen.
Given the following starting tree the results of the 4 standard Cobweb
operations are shown below:
.. image:: images/Original.png
:width: 200px
:align: center
* **Best** - Categorize the instance to child with the best category
utility. This results in a recurisve call to :meth:`cobweb
<concept_formation.cobweb.CobwebTree.cobweb>`.
.. image:: images/Best.png
:width: 200px
:align: center
* **New** - Create a new child node to the current node and add the
instance there. See: :meth:`create_new_child
<concept_formation.cobweb.CobwebNode.create_new_child>`.
.. image:: images/New.png
:width: 200px
:align: center
* **Merge** - Take the two best children, create a new node as their
mutual parent and add the instance there. See: :meth:`merge
<concept_formation.cobweb.CobwebNode.merge>`.
.. image:: images/Merge.png
:width: 200px
:align: center
* **Split** - Take the best node and promote its children to be
children of the current node and recurse on the current node. See:
:meth:`split <concept_formation.cobweb.CobwebNode.split>`
.. image:: images/Split.png
:width: 200px
:align: center
Each operation is entertained and the resultant category utility is
used to pick which operation to perform. The list of operations to
entertain can be controlled with the possible_ops parameter. For
example, when performing categorization without modifying knoweldge
only the best and new operators are used.
:param instance: The instance currently being categorized
:type instance: :ref:`Instance<instance-rep>`
:param best1: A tuple containing the relative cu of the best child and
the child itself, as determined by
:meth:`CobwebNode.two_best_children`.
:type best1: (float, CobwebNode)
:param best2: A tuple containing the relative cu of the second best
child and the child itself, as determined by
:meth:`CobwebNode.two_best_children`.
:type best2: (float, CobwebNode)
:param possible_ops: A list of operations from ["best", "new", "merge",
"split"] to entertain.
:type possible_ops: ["best", "new", "merge", "split"]
:return: A tuple of the category utility of the best operation and the
name of the best operation.
:rtype: (cu_bestOp, name_bestOp)
"""
if not best1:
raise ValueError("Need at least one best child.")
operations = []
if "best" in possible_ops:
operations.append((best1_cu, random(), "best"))
if "new" in possible_ops:
operations.append((self.cu_for_new_child(instance), random(),
'new'))
if "merge" in possible_ops and len(self.children) > 2 and best2:
operations.append((self.cu_for_merge(best1, best2, instance),
random(), 'merge'))
if "split" in possible_ops and len(best1.children) > 0:
operations.append((self.cu_for_split(best1), random(), 'split'))
operations.sort(reverse=True)
# print(operations)
best_op = (operations[0][0], operations[0][2])
# print(best_op)
return best_op
def two_best_children(self, instance):
"""
Calculates the category utility of inserting the instance into each of
this node's children and returns the best two. In the event of ties
children are sorted first by category utility, then by their size, then
by a random value.
:param instance: The instance currently being categorized
:type instance: :ref:`Instance<instance-rep>`
:return: the category utility and indices for the two best children
(the second tuple will be ``None`` if there is only 1 child).
:rtype: ((cu_best1,index_best1),(cu_best2,index_best2))
"""
if len(self.children) == 0:
raise Exception("No children!")
children_relative_cu = [(self.relative_cu_for_insert(child, instance),
child.count, random(), child) for child in
self.children]
children_relative_cu.sort(reverse=True)
# Convert the relative CU's of the two best children into CU scores
# that can be compared with the other operations.
const = self.compute_relative_CU_const(instance)
best1 = children_relative_cu[0][3]
best1_relative_cu = children_relative_cu[0][0]
best1_cu = (best1_relative_cu / (self.count+1) / len(self.children)
+ const)
best2 = None
if len(children_relative_cu) > 1:
best2 = children_relative_cu[1][3]
return best1_cu, best1, best2
def compute_relative_CU_const(self, instance):
"""
Computes the constant value that is used to convert between CU and
relative CU scores. The constant value is basically the category
utility that results from adding the instance to the root, but none of
the children. It can be computed directly as:
.. math::
const = \\frac{1}{n} \\sum_{k=1}^{n} \\left[
\\frac{C_k.count}{count + 1} \\sum_i \\sum_j P(A_i = V_{ij} |
C)^2 \\right] - \\sum_i \\sum_j P(A_i = V_{ij} | UpdatedRoot)^2
where :math:`n` is the number of children of the root, :math:`C_k` is
child :math:`k`, :math:`C_k.count` is the number of instances stored
in child :math:`C_k`, :math:`count` is the number of instances stored
in the root. Finally, :math:`UpdatedRoot` is a copy of the root that
has been updated with the counts of the instance.
:param instance: The instance currently being categorized
:type instance: :ref:`Instance<instance-rep>`
:return: The value of the constant used to relativize the CU.
:rtype: float
"""
temp = self.shallow_copy()
temp.increment_counts(instance)
ec_root_u = temp.expected_correct_guesses()
const = 0
for c in self.children:
const += ((c.count / (self.count + 1)) *
c.expected_correct_guesses())
const -= ec_root_u
const /= len(self.children)
return const
def relative_cu_for_insert(self, child, instance):
"""
Computes a relative CU score for each insert operation. The relative CU
score is more efficient to calculate for each insert operation and is
guranteed to have the same rank ordering as the CU score so it can be
used to determine which insert operation is best. The relative CU can
be computed from the CU using the following transformation.
.. math::
relative\\_cu(cu) = (cu - const) * n * (count + 1)
where :math:`const` is the one returned by
:meth:`CobwebNode.compute_relative_CU_const`, :math:`n` is the number
of children of the current node, and :math:`count` is the number of
instances stored in the current node (the root).
The particular :math:`const` value was chosen to make the calculation
of the relative cu scores for each insert operation efficient. When
computing the CU for inserting the instance into a particular child,
the terms in the formula above can be expanded and many of the
intermediate calculations cancel out. After these cancelations,
computing the relative CU for inserting into a particular child
:math:`C_i` reduces to:
.. math::
relative\\_cu\\_for\\_insert(C_i) = (C_i.count + 1) * \\sum_i
\\sum_j P(A_i = V_{ij}| UpdatedC_i)^2 - (C_i.count) * \\sum_i
\\sum_j P(A_i = V_{ij}| C_i)^2
where :math:`UpdatedC_i` is a copy of :math:`C_i` that has been updated
with the counts from the given instance.
By computing relative_CU scores instead of CU scores for each insert
operation, the time complexity of the underlying Cobweb algorithm is
reduced from :math:`O(B^2 \\times log_B(n) \\times AV)` to
:math:`O(B \\times log_B(n) \\times AV)` where :math:`B` is the average
branching factor of the tree, :math:`n` is the number of instances
being categorized, :math:`A` is the average number of attributes per
instance, and :math:`V` is the average number of values per attribute.
:param child: a child of the current node
:type child: CobwebNode
:param instance: The instance currently being categorized
:type instance: :ref:`Instance<instance-rep>`
:return: the category utility of adding the instance to the given node
:rtype: float
"""
temp = child.shallow_copy()
temp.increment_counts(instance)
return ((child.count + 1) * temp.expected_correct_guesses() -
child.count * child.expected_correct_guesses())
def cu_for_insert(self, child, instance):
"""
Compute the category utility of adding the instance to the specified
child.
This operation does not actually insert the instance into the child it
only calculates what the result of the insertion would be. For the
actual insertion function see: :meth:`CobwebNode.increment_counts` This
is the function used to determine the best children for each of the
other operations.
:param child: a child of the current node
:type child: CobwebNode
:param instance: The instance currently being categorized
:type instance: :ref:`Instance<instance-rep>`
:return: the category utility of adding the instance to the given node
:rtype: float
.. seealso:: :meth:`CobwebNode.two_best_children` and
:meth:`CobwebNode.get_best_operation`
"""
temp = self.shallow_copy()
temp.increment_counts(instance)
for c in self.children:
temp_child = c.shallow_copy()
temp.children.append(temp_child)
temp_child.parent = temp
if c == child:
temp_child.increment_counts(instance)
return temp.category_utility()
def create_new_child(self, instance):
"""
Create a new child (to the current node) with the counts initialized by
the *given instance*.
This is the operation used for creating a new child to a node and
adding the instance to it.
:param instance: The instance currently being categorized
:type instance: :ref:`Instance<instance-rep>`
:return: The new child
:rtype: CobwebNode
"""
new_child = self.__class__()
new_child.parent = self
new_child.tree = self.tree
new_child.increment_counts(instance)
self.children.append(new_child)
return new_child
def create_child_with_current_counts(self):
"""
Create a new child (to the current node) with the counts initialized by
the *current node's counts*.
This operation is used in the speical case of a fringe split when a new
node is created at a leaf.
:return: The new child
:rtype: CobwebNode
"""
if self.count > 0:
new = self.__class__(self)
new.parent = self
new.tree = self.tree
self.children.append(new)
return new
def cu_for_new_child(self, instance):
"""
Return the category utility for creating a new child using the
particular instance.
This operation does not actually create the child it only calculates
what the result of creating it would be. For the actual new function
see: :meth:`CobwebNode.create_new_child`.
:param instance: The instance currently being categorized
:type instance: :ref:`Instance<instance-rep>`
:return: the category utility of adding the instance to a new child.
:rtype: float
.. seealso:: :meth:`CobwebNode.get_best_operation`
"""
temp = self.shallow_copy()
for c in self.children:
temp.children.append(c.shallow_copy())
# temp = self.shallow_copy()
temp.increment_counts(instance)
temp.create_new_child(instance)
return temp.category_utility()
def merge(self, best1, best2):
"""
Merge the two specified nodes.
A merge operation introduces a new node to be the merger of the the two
given nodes. This new node becomes a child of the current node and the
two given nodes become children of the new node.
:param best1: The child of the current node with the best category
utility
:type best1: CobwebNode
:param best2: The child of the current node with the second best
category utility
:type best2: CobwebNode
:return: The new child node that was created by the merge
:rtype: CobwebNode
"""
new_child = self.__class__()
new_child.parent = self
new_child.tree = self.tree
new_child.update_counts_from_node(best1)
new_child.update_counts_from_node(best2)
best1.parent = new_child
# best1.tree = new_child.tree
best2.parent = new_child
# best2.tree = new_child.tree
new_child.children.append(best1)
new_child.children.append(best2)
self.children.remove(best1)
self.children.remove(best2)
self.children.append(new_child)
return new_child
def cu_for_merge(self, best1, best2, instance):
"""
Return the category utility for merging the two best children.
This does not actually merge the two children it only calculates what
the result of the merge would be. For the actual merge operation see:
:meth:`CobwebNode.merge`
:param best1: The child of the current node with the best category
utility
:type best1: CobwebNode
:param best2: The child of the current node with the second best
category utility
:type best2: CobwebNode
:param instance: The instance currently being categorized
:type instance: :ref:`Instance<instance-rep>`
:return: The category utility that would result from merging best1 and
best2.
:rtype: float
.. seealso:: :meth:`CobwebNode.get_best_operation`
"""
temp = self.shallow_copy()
temp.increment_counts(instance)
new_child = self.__class__()
new_child.tree = self.tree
new_child.parent = temp
new_child.update_counts_from_node(best1)
new_child.update_counts_from_node(best2)
new_child.increment_counts(instance)
temp.children.append(new_child)
for c in self.children:
if c == best1 or c == best2:
continue
temp_child = c.shallow_copy()
temp.children.append(temp_child)
return temp.category_utility()
def split(self, best):
"""
Split the best node and promote its children
A split operation removes a child node and promotes its children to be
children of the current node. Split operations result in a recursive
call of cobweb on the current node so this function does not return
anything.
:param best: The child node to be split
:type best: CobwebNode
"""
self.children.remove(best)
for child in best.children:
child.parent = self
child.tree = self.tree
self.children.append(child)
def cu_for_fringe_split(self, instance):
"""
Return the category utility of performing a fringe split (i.e.,
adding a leaf to a leaf).
A "fringe split" is essentially a new operation performed at a leaf. It
is necessary to have the distinction because unlike a normal split a
fringe split must also push the parent down to maintain a proper tree
structure. This is useful for identifying unnecessary fringe splits,
when the two leaves are essentially identical. It can be used to keep
the tree from growing and to increase the tree's predictive accuracy.
:param instance: The instance currently being categorized
:type instance: :ref:`Instance<instance-rep>`
:return: the category utility of fringe splitting at the current node.
:rtype: float
.. seealso:: :meth:`CobwebNode.get_best_operation`
"""
temp = self.shallow_copy()
temp.create_child_with_current_counts()
temp.increment_counts(instance)
temp.create_new_child(instance)
return temp.category_utility()
def cu_for_split(self, best):
"""
Return the category utility for splitting the best child.
This does not actually split the child it only calculates what the
result of the split would be. For the actual split operation see:
:meth:`CobwebNode.split`. Unlike the category utility calculations for
the other operations split does not need the instance because splits
trigger a recursive call on the current node.
:param best: The child of the current node with the best category
utility
:type best: CobwebNode
:return: The category utility that would result from splitting best
:rtype: float
.. seealso:: :meth:`CobwebNode.get_best_operation`
"""
temp = self.shallow_copy()
for c in self.children + best.children:
if c == best:
continue
temp_child = c.shallow_copy()
temp.children.append(temp_child)
return temp.category_utility()
def is_exact_match(self, instance):
"""
Returns true if the concept exactly matches the instance.
:param instance: The instance currently being categorized
:type instance: :ref:`Instance<instance-rep>`
:return: whether the instance perfectly matches the concept
:rtype: boolean
.. seealso:: :meth:`CobwebNode.get_best_operation`
"""
for attr in set(instance).union(set(self.attrs())):
if attr[0] == '_':
continue
if attr in instance and attr not in self.av_counts:
return False
if attr in self.av_counts and attr not in instance:
return False
if attr in self.av_counts and attr in instance:
if instance[attr] not in self.av_counts[attr]:
return False
if not self.av_counts[attr][instance[attr]] == self.count:
return False
return True
def __hash__(self):
"""
The basic hash function. This hashes the concept name, which is
generated to be unique across concepts.
"""
return hash("CobwebNode" + str(self.concept_id))
def gensym(self):
"""
Generate a unique id and increment the class _counter.
This is used to create a unique name for every concept. As long as the
class _counter variable is never externally altered these keys will
remain unique.
"""
self.__class__._counter += 1
return self.__class__._counter
# return str(self.__class__._counter)
def __str__(self):
"""
Call :meth:`CobwebNode.pretty_print`
"""
return self.pretty_print()
def pretty_print(self, depth=0):
"""
Print the categorization tree
The string formatting inserts tab characters to align child nodes of
the same depth.
:param depth: The current depth in the print, intended to be called
recursively
:type depth: int
:return: a formated string displaying the tree and its children
:rtype: str
"""
ret = str(('\t' * depth) + "|-" + str(self.av_counts) + ":" +
str(self.count) + '\n')
for c in self.children:
ret += c.pretty_print(depth+1)
return ret
def depth(self):
"""
Returns the depth of the current node in its tree
:return: the depth of the current node in its tree
:rtype: int
"""
if self.parent:
return 1 + self.parent.depth()
return 0
def is_parent(self, other_concept):
"""
Return True if this concept is a parent of other_concept
:return: ``True`` if this concept is a parent of other_concept else
``False``
:rtype: bool
"""
temp = other_concept
while temp is not None:
if temp == self:
return True
try:
temp = temp.parent
except Exception:
print(temp)
assert False
return False
def num_concepts(self):
"""
Return the number of concepts contained below the current node in the
tree.
When called on the :attr:`CobwebTree.root` this is the number of nodes
in the whole tree.
:return: the number of concepts below this concept.
:rtype: int
"""
children_count = 0
for c in self.children:
children_count += c.num_concepts()
return 1 + children_count
def output_json(self):
"""
Outputs the categorization tree in JSON form
:return: an object that contains all of the structural information of
the node and its children
:rtype: obj
"""
output = {}
output['name'] = "Concept" + str(self.concept_id)
output['size'] = self.count
output['children'] = []
temp = {}
for attr in self.attrs('all'):
for value in self.av_counts[attr]:
temp[str(attr)] = {str(value): self.av_counts[attr][value] for
value in self.av_counts[attr]}
# temp[attr + " = " + str(value)] = self.av_counts[attr][value]
for child in self.children:
output["children"].append(child.output_json())
output['counts'] = temp
return output
def get_weighted_values(self, attr, allow_none=True):
"""
Return a list of weighted choices for an attribute based on the node's
probability table.
This calculation will include an option for the change that an
attribute is missing from an instance all together. This is useful for
probability and sampling calculations. If the attribute has never
appeared in the tree then it will return a 100% chance of None.
:param attr: an attribute of an instance
:type attr: :ref:`Attribute<attributes>`
:param allow_none: whether attributes in the nodes probability table
can be inferred to be missing. If False, then None will not be
cosidered as a possible value.
:type allow_none: Boolean
:return: a list of weighted choices for attr's value
:rtype: [(:ref:`Value<values>`, float), (:ref:`Value<values>`, float),
...]
"""
choices = []
if attr not in self.av_counts:
choices.append((None, 1.0))
return choices
val_count = 0
for val in self.av_counts[attr]:
count = self.av_counts[attr][val]
choices.append((val, count / self.count))
val_count += count
if allow_none:
choices.append((None, ((self.count - val_count) / self.count)))
return choices
def predict(self, attr, choice_fn="most likely", allow_none=True):
"""
Predict the value of an attribute, using the specified choice function
(either the "most likely" value or a "sampled" value).
:param attr: an attribute of an instance.
:type attr: :ref:`Attribute<attributes>`
:param choice_fn: a string specifying the choice function to use,
either "most likely" or "sampled".
:type choice_fn: a string
:param allow_none: whether attributes not in the instance can be
inferred to be missing. If False, then all attributes will be
inferred with some value.
:type allow_none: Boolean
:return: The most likely value for the given attribute in the node's
probability table.
:rtype: :ref:`Value<values>`
"""
if choice_fn == "most likely" or choice_fn == "m":
choose = most_likely_choice
elif choice_fn == "sampled" or choice_fn == "s":
choose = weighted_choice
else:
raise Exception("Unknown choice_fn")
if attr not in self.av_counts:
return None
choices = self.get_weighted_values(attr, allow_none)
val = choose(choices)
return val
def probability(self, attr, val):
"""
Returns the probability of a particular attribute value at the current
concept. This takes into account the possibilities that an attribute
can take any of the values available at the root, or be missing.
If you you want to check if the probability that an attribute is
missing, then check for the probability that the val is ``None``.
:param attr: an attribute of an instance
:type attr: :ref:`Attribute<attributes>`
:param val: a value for the given attribute or None
:type val: :ref:`Value<values>`
:return: The probability of attr having the value val in the current
concept.
:rtype: float
"""
if val is None:
c = 0.0
if attr in self.av_counts:
c = sum([self.av_counts[attr][v] for v in
self.av_counts[attr]])
return (self.count - c) / self.count
if attr in self.av_counts and val in self.av_counts[attr]:
return self.av_counts[attr][val] / self.count
return 0.0
def log_likelihood(self, child_leaf):
"""
Returns the log-likelihood of a leaf contained within the current
concept. Note, if the leaf contains multiple instances, then it is
treated as if it contained just a single instance (this function is
just called multiple times for each instance in the leaf).
"""
ll = 0
for attr in set(self.attrs()).union(set(child_leaf.attrs())):
vals = set([None])
if attr in self.av_counts:
vals.update(self.av_counts[attr])
if attr in child_leaf.av_counts:
vals.update(child_leaf.av_counts[attr])
for val in vals:
op = child_leaf.probability(attr, val)
if op > 0:
p = self.probability(attr, val) * op
if p >= 0:
ll += log(p)
else:
raise Exception("Should always be greater than 0")
return ll
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import six
import time
from datetime import datetime
from twisted.python.failure import Failure
from twisted.python import log
from twisted.internet import defer
from twisted.internet.interfaces import IStreamClientEndpoint
from zope.interface import implementer
from .interface import IRouterContainer, IStreamAttacher
from txtorcon.util import find_keywords, maybe_ip_addr, SingleObserver
# look like "2014-01-25T02:12:14.593772"
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
def _extract_reason(kw):
"""
Internal helper. Extracts a reason (possibly both reasons!) from
the kwargs for a circuit failed or closed event.
"""
try:
# we "often" have a REASON
reason = kw['REASON']
try:
# ...and sometimes even have a REMOTE_REASON
reason = '{}, {}'.format(reason, kw['REMOTE_REASON'])
except KeyError:
pass # should still be the 'REASON' error if we had it
except KeyError:
reason = "unknown"
return reason
@implementer(IStreamAttacher)
class _CircuitAttacher(object):
"""
Internal helper.
If we've ever called .stream_via or .web_agent, then one of these
is added as "the" stream-attacher.
"""
def __init__(self):
# map real_host (IPAddress) -> circuit
self._circuit_targets = dict()
def add_endpoint(self, target_ep, circuit):
"""
Returns a Deferred that fires when we've attached this endpoint to
the provided circuit.
"""
# This can seem a little .. convulted. What's going on is
# we're asking the TorCircuitEndpoint to tell us when it gets
# the local address (i.e. when "whomever created the endpoit"
# actually connects locally). We need this address to
# successfully map incoming streams.
d = defer.Deferred()
target_ep._get_address().addCallback(self._add_real_target, circuit, d)
return d
def _add_real_target(self, real_addr, circuit, d):
# joy oh joy, ipaddress wants unicode, Twisted gives us bytes...
real_host = maybe_ip_addr(six.text_type(real_addr.host))
real_port = real_addr.port
self._circuit_targets[(real_host, real_port)] = (circuit, d)
def attach_stream_failure(self, stream, fail):
"""
IStreamAttacher API
"""
k = (stream.source_addr, stream.source_port)
try:
(circ, d) = self._circuit_targets.pop(k)
d.errback(fail)
except KeyError:
pass
# so this means ... we got an error, but on a stream we either
# don't care about or already .callback()'d so should we log
# it? or ignore?
return None
@defer.inlineCallbacks
def attach_stream(self, stream, circuits):
"""
IStreamAttacher API
"""
k = (stream.source_addr, stream.source_port)
try:
circuit, d = self._circuit_targets.pop(k)
except KeyError:
return
try:
yield circuit.when_built()
if circuit.state in ['FAILED', 'CLOSED', 'DETACHED']:
d.errback(Failure(RuntimeError(
"Circuit {circuit.id} in state {circuit.state} so unusable".format(
circuit=circuit,
)
)))
return
d.callback(None)
defer.returnValue(circuit)
except Exception:
d.errback(Failure())
@defer.inlineCallbacks
def _get_circuit_attacher(reactor, state):
if _get_circuit_attacher.attacher is None:
_get_circuit_attacher.attacher = _CircuitAttacher()
yield state.set_attacher(_get_circuit_attacher.attacher, reactor)
defer.returnValue(_get_circuit_attacher.attacher)
_get_circuit_attacher.attacher = None
@implementer(IStreamClientEndpoint)
class TorCircuitEndpoint(object):
def __init__(self, reactor, torstate, circuit, target_endpoint):
self._reactor = reactor
self._state = torstate
self._target_endpoint = target_endpoint # a TorClientEndpoint
self._circuit = circuit
@defer.inlineCallbacks
def connect(self, protocol_factory):
"""IStreamClientEndpoint API"""
# need to:
# 1. add 'our' attacher to state
# 2. do the "underlying" connect
# 3. recognize our stream
# 4. attach it to our circuit
attacher = yield _get_circuit_attacher(self._reactor, self._state)
# note that we'll only ever add an attacher once, and then it
# stays there "forever". so if you never call the .stream_via
# or .web_agent APIs, set_attacher won't get called .. but if
# you *do*, then you can't call set_attacher yourself (because
# that's an error). See discussion in set_attacher on
# TorState or issue #169
yield self._circuit.when_built()
connect_d = self._target_endpoint.connect(protocol_factory)
attached_d = attacher.add_endpoint(self._target_endpoint, self._circuit)
proto = yield connect_d
yield attached_d
defer.returnValue(proto)
class Circuit(object):
"""
Used by :class:`txtorcon.TorState` to represent one of Tor's circuits.
This is kept up-to-date by the :class`txtorcon.TorState` that owns it, and
individual circuits can be listened to for updates (or listen to
every one using :meth:`txtorcon.TorState.add_circuit_listener`)
:ivar path:
contains a list of :class:`txtorcon.Router` objects
representing the path this Circuit takes. Mostly this will be
3 or 4 routers long. Note that internally Tor uses single-hop
paths for some things. See also the *purpose*
instance-variable.
:ivar streams:
contains a list of Stream objects representing all streams
currently attached to this circuit.
:ivar state:
contains a string from Tor describing the current state of the
stream. From control-spec.txt section 4.1.1, these are:
- LAUNCHED: circuit ID assigned to new circuit
- BUILT: all hops finished, can now accept streams
- EXTENDED: one more hop has been completed
- FAILED: circuit closed (was not built)
- CLOSED: circuit closed (was built)
:ivar purpose:
The reason this circuit was built. For most purposes, you'll
want to look at `GENERAL` circuits only. Values can currently
be one of (but see control-spec.txt 4.1.1):
- GENERAL
- HS_CLIENT_INTRO
- HS_CLIENT_REND
- HS_SERVICE_INTRO
- HS_SERVICE_REND
- TESTING
- CONTROLLER
:ivar id: The ID of this circuit, a number (or None if unset).
"""
def __init__(self, routercontainer):
"""
:param routercontainer:
should implement :class:`txtorcon.interface.IRouterContainer`.
"""
self.listeners = []
self.router_container = IRouterContainer(routercontainer)
self._torstate = routercontainer # XXX FIXME
self.path = []
self.streams = []
self.purpose = None
self.id = None
self.state = 'UNKNOWN'
self.build_flags = []
self.flags = {}
# this is used to hold a Deferred that will callback() when
# this circuit is being CLOSED or FAILED.
self._closing_deferred = None
# XXX ^ should probably be when_closed() etc etc...
# caches parsed value for time_created()
self._time_created = None
# all notifications for when_built, when_closed
self._when_built = SingleObserver()
self._when_closed = SingleObserver()
# XXX backwards-compat for old .is_built for now
@property
def is_built(self):
return self.when_built()
def when_built(self):
"""
Returns a Deferred that is callback()'d (with this Circuit
instance) when this circuit hits BUILT.
If it's already BUILT when this is called, you get an
already-successful Deferred; otherwise, the state must change
to BUILT.
If the circuit will never hit BUILT (e.g. it is abandoned by
Tor before it gets to BUILT) you will receive an errback
"""
# XXX note to self: we never do an errback; fix this behavior
if self.state == 'BUILT':
return defer.succeed(self)
return self._when_built.when_fired()
def when_closed(self):
"""
Returns a Deferred that callback()'s (with this Circuit instance)
when this circuit hits CLOSED or FAILED.
"""
if self.state in ['CLOSED', 'FAILED']:
return defer.succeed(self)
return self._when_closed.when_fired()
def web_agent(self, reactor, socks_endpoint, pool=None):
"""
:param socks_endpoint: create one with
:meth:`txtorcon.TorConfig.create_socks_endpoint`. Can be a
Deferred.
:param pool: passed on to the Agent (as ``pool=``)
"""
# local import because there isn't Agent stuff on some
# platforms we support, so this will only error if you try
# this on the wrong platform (pypy [??] and old-twisted)
from txtorcon import web
return web.tor_agent(
reactor,
socks_endpoint,
circuit=self,
pool=pool,
)
# XXX should make this API match above web_agent (i.e. pass a
# socks_endpoint) or change the above...
def stream_via(self, reactor, host, port,
socks_endpoint,
use_tls=False):
"""
This returns an `IStreamClientEndpoint`_ that will connect to
the given ``host``, ``port`` via Tor -- and via this
parciular circuit.
We match the streams up using their source-ports, so even if
there are many streams in-flight to the same destination they
will align correctly. For example, to cause a stream to go to
``torproject.org:443`` via a particular circuit::
@inlineCallbacks
def main(reactor):
circ = yield torstate.build_circuit() # lets Tor decide the path
yield circ.when_built()
tor_ep = circ.stream_via(reactor, 'torproject.org', 443)
# 'factory' is for your protocol
proto = yield tor_ep.connect(factory)
Note that if you're doing client-side Web requests, you
probably want to use `treq
<http://treq.readthedocs.org/en/latest/>`_ or ``Agent``
directly so call :meth:`txtorcon.Circuit.web_agent` instead.
:param socks_endpoint: should be a Deferred firing a valid
IStreamClientEndpoint pointing at a Tor SOCKS port (or an
IStreamClientEndpoint already).
.. _istreamclientendpoint: https://twistedmatrix.com/documents/current/api/twisted.internet.interfaces.IStreamClientEndpoint.html
"""
from .endpoints import TorClientEndpoint
ep = TorClientEndpoint(
host, port, socks_endpoint,
tls=use_tls,
reactor=reactor,
)
return TorCircuitEndpoint(reactor, self._torstate, self, ep)
@property
def time_created(self):
if self._time_created is not None:
return self._time_created
if 'TIME_CREATED' in self.flags:
# strip off milliseconds
t = self.flags['TIME_CREATED'].split('.')[0]
tstruct = time.strptime(t, TIME_FORMAT)
self._time_created = datetime(*tstruct[:7])
return self._time_created
def listen(self, listener):
if listener not in self.listeners:
self.listeners.append(listener)
def unlisten(self, listener):
self.listeners.remove(listener)
def close(self, **kw):
"""
This asks Tor to close the underlying circuit object. See
:meth:`txtorcon.torstate.TorState.close_circuit`
for details.
You may pass keyword arguments to take care of any Flags Tor
accepts for the CLOSECIRCUIT command. Currently, this is only
"IfUnused". So for example: circ.close(IfUnused=True)
:return: Deferred which callbacks with this Circuit instance
ONLY after Tor has confirmed it is gone (not simply that the
CLOSECIRCUIT command has been queued). This could be a while
if you included IfUnused.
"""
# we're already closed; nothing to do
if self.state == 'CLOSED':
return defer.succeed(None)
# someone already called close() but we're not closed yet
if self._closing_deferred:
d = defer.Deferred()
def closed(arg):
d.callback(arg)
return arg
self._closing_deferred.addBoth(closed)
return d
# actually-close the circuit
self._closing_deferred = defer.Deferred()
def close_command_is_queued(*args):
return self._closing_deferred
d = self._torstate.close_circuit(self.id, **kw)
d.addCallback(close_command_is_queued)
return d
def age(self, now=None):
"""
Returns an integer which is the difference in seconds from
'now' to when this circuit was created.
Returns None if there is no created-time.
"""
if not self.time_created:
return None
if now is None:
now = datetime.utcnow()
return (now - self.time_created).seconds
def _create_flags(self, kw):
"""
this clones the kw dict, adding a lower-case version of every
key (duplicated in stream.py; put in util?)
"""
flags = {}
for k in kw.keys():
flags[k] = kw[k]
flags[k.lower()] = kw[k]
return flags
def update(self, args):
# print "Circuit.update:",args
if self.id is None:
self.id = int(args[0])
for x in self.listeners:
x.circuit_new(self)
else:
if int(args[0]) != self.id:
raise RuntimeError("Update for wrong circuit.")
self.state = args[1]
kw = find_keywords(args)
self.flags = kw
if 'PURPOSE' in kw:
self.purpose = kw['PURPOSE']
if 'BUILD_FLAGS' in kw:
self.build_flags = kw['BUILD_FLAGS'].split(',')
if self.state == 'LAUNCHED':
self.path = []
for x in self.listeners:
x.circuit_launched(self)
else:
if self.state != 'FAILED' and self.state != 'CLOSED':
if len(args) > 2:
self.update_path(args[2].split(','))
if self.state == 'BUILT':
for x in self.listeners:
x.circuit_built(self)
self._when_built.fire(self)
elif self.state == 'CLOSED':
if len(self.streams) > 0:
# it seems this can/does happen if a remote router
# crashes or otherwise shuts down a circuit with
# streams on it still .. also if e.g. you "carml circ
# --delete " the circuit while the stream is
# in-progress...can we do better than logging?
# *should* we do anything else (the stream should get
# closed still by Tor).
log.msg(
"Circuit is {} but still has {} streams".format(
self.state, len(self.streams)
)
)
flags = self._create_flags(kw)
self.maybe_call_closing_deferred()
for x in self.listeners:
x.circuit_closed(self, **flags)
elif self.state == 'FAILED':
if len(self.streams) > 0:
log.err(RuntimeError("Circuit is %s but still has %d streams" %
(self.state, len(self.streams))))
flags = self._create_flags(kw)
self.maybe_call_closing_deferred()
for x in self.listeners:
x.circuit_failed(self, **flags)
def maybe_call_closing_deferred(self):
"""
Used internally to callback on the _closing_deferred if it
exists.
"""
if self._closing_deferred:
self._closing_deferred.callback(self)
self._closing_deferred = None
self._when_closed.fire(self)
def update_path(self, path):
"""
There are EXTENDED messages which don't include any routers at
all, and any of the EXTENDED messages may have some arbitrary
flags in them. So far, they're all upper-case and none start
with $ luckily. The routers in the path should all be
LongName-style router names (this depends on them starting
with $).
For further complication, it's possible to extend a circuit to
a router which isn't in the consensus. nickm via #tor thought
this might happen in the case of hidden services choosing a
rendevouz point not in the current consensus.
"""
oldpath_len = len(self.path)
self.path = []
for p in path:
if p[0] != '$':
break
# this will create a Router if we give it a router
# LongName that doesn't yet exist
router = self.router_container.router_from_id(p)
self.path.append(router)
# if the path grew, notify listeners
if len(self.path) > oldpath_len:
for x in self.listeners:
x.circuit_extend(self, router)
oldpath_len = len(self.path)
def __str__(self):
path = ' '.join([x.ip for x in self.path])
return "<Circuit %d %s [%s] for %s>" % (self.id, self.state, path,
self.purpose)
class CircuitBuildTimedOutError(Exception):
"""
This exception is thrown when using `timed_circuit_build`
and the circuit build times-out.
"""
def build_timeout_circuit(tor_state, reactor, path, timeout, using_guards=False):
"""
Build a new circuit within a timeout.
CircuitBuildTimedOutError will be raised unless we receive a
circuit build result (success or failure) within the `timeout`
duration.
:returns: a Deferred which fires when the circuit build succeeds (or
fails to build).
"""
timed_circuit = []
d = tor_state.build_circuit(routers=path, using_guards=using_guards)
def get_circuit(c):
timed_circuit.append(c)
return c
def trap_cancel(f):
f.trap(defer.CancelledError)
if timed_circuit:
d2 = timed_circuit[0].close()
else:
d2 = defer.succeed(None)
d2.addCallback(lambda _: Failure(CircuitBuildTimedOutError("circuit build timed out")))
return d2
d.addCallback(get_circuit)
d.addCallback(lambda circ: circ.when_built())
d.addErrback(trap_cancel)
reactor.callLater(timeout, d.cancel)
return d
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import fnmatch
import glob
import itertools
import os.path
import re
import six
from oslo_config import cfg
from oslo_log import log
from conveyor.conveyorheat.common import environment_format as env_fmt
from conveyor.conveyorheat.common import exception
from conveyor.conveyorheat.common import policy
from conveyor.conveyorheat.engine import support
from conveyor.i18n import _
from conveyor.i18n import _LE
from conveyor.i18n import _LI
from conveyor.i18n import _LW
LOG = log.getLogger(__name__)
HOOK_TYPES = (
HOOK_PRE_CREATE, HOOK_PRE_UPDATE, HOOK_PRE_DELETE, HOOK_POST_CREATE,
HOOK_POST_UPDATE, HOOK_POST_DELETE
) = (
'pre-create', 'pre-update', 'pre-delete', 'post-create',
'post-update', 'post-delete'
)
RESTRICTED_ACTIONS = (UPDATE, REPLACE) = ('update', 'replace')
def valid_hook_type(hook):
return hook in HOOK_TYPES
def valid_restricted_actions(action):
return action in RESTRICTED_ACTIONS
def is_hook_definition(key, value):
is_valid_hook = False
if key == 'hooks':
if isinstance(value, six.string_types):
is_valid_hook = valid_hook_type(value)
elif isinstance(value, collections.Sequence):
is_valid_hook = all(valid_hook_type(hook) for hook in value)
if not is_valid_hook:
msg = (_('Invalid hook type "%(value)s" for resource '
'breakpoint, acceptable hook types are: %(types)s') %
{'value': value, 'types': HOOK_TYPES})
raise exception.InvalidBreakPointHook(message=msg)
return is_valid_hook
def is_valid_restricted_action(key, value):
valid_action = False
if key == 'restricted_actions':
if isinstance(value, six.string_types):
valid_action = valid_restricted_actions(value)
elif isinstance(value, collections.Sequence):
valid_action = all(valid_restricted_actions(
action) for action in value)
if not valid_action:
msg = (_('Invalid restricted_action type "%(value)s" for '
'resource, acceptable restricted_action '
'types are: %(types)s') %
{'value': value, 'types': RESTRICTED_ACTIONS})
raise exception.InvalidRestrictedAction(message=msg)
return valid_action
class ResourceInfo(object):
"""Base mapping of resource type to implementation."""
def __new__(cls, registry, path, value, **kwargs):
"""Create a new ResourceInfo of the appropriate class."""
if cls != ResourceInfo:
# Call is already for a subclass, so pass it through
return super(ResourceInfo, cls).__new__(cls)
name = path[-1]
if name.endswith(('.yaml', '.template')):
# a template url for the resource "Type"
return TemplateResourceInfo(registry, path, value)
elif not isinstance(value, six.string_types):
return ClassResourceInfo(registry, path, value)
elif value.endswith(('.yaml', '.template')):
# a registered template
return TemplateResourceInfo(registry, path, value)
elif name.endswith('*'):
return GlobResourceInfo(registry, path, value)
else:
return MapResourceInfo(registry, path, value)
def __init__(self, registry, path, value):
self.registry = registry
self.path = path
self.name = path[-1]
self.value = value
self.user_resource = True
def __eq__(self, other):
if other is None:
return False
return (self.path == other.path and
self.value == other.value and
self.user_resource == other.user_resource)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if self.user_resource != other.user_resource:
# user resource must be sorted above system ones.
return self.user_resource > other.user_resource
if len(self.path) != len(other.path):
# more specific (longer) path must be sorted above system ones.
return len(self.path) > len(other.path)
return self.path < other.path
def __gt__(self, other):
return other.__lt__(self)
def get_resource_info(self, resource_type=None, resource_name=None):
return self
def matches(self, resource_type):
return False
def get_class(self):
raise NotImplemented
def get_class_to_instantiate(self):
return self.get_class()
def __str__(self):
return '[%s](User:%s) %s -> %s' % (self.description,
self.user_resource,
self.name, str(self.value))
class ClassResourceInfo(ResourceInfo):
"""Store the mapping of resource name to python class implementation."""
description = 'Plugin'
def get_class(self, files=None):
return self.value
class TemplateResourceInfo(ResourceInfo):
"""Store the info needed to start a TemplateResource."""
description = 'Template'
def __init__(self, registry, path, value):
super(TemplateResourceInfo, self).__init__(registry, path, value)
if self.name.endswith(('.yaml', '.template')):
self.template_name = self.name
else:
self.template_name = value
self.value = self.template_name
def get_class(self, files=None):
from conveyor.conveyorheat.engine.resources import template_resource
if files and self.template_name in files:
data = files[self.template_name]
else:
if self.user_resource:
allowed_schemes = template_resource.REMOTE_SCHEMES
else:
allowed_schemes = template_resource.LOCAL_SCHEMES
data = template_resource.TemplateResource.get_template_file(
self.template_name,
allowed_schemes)
env = self.registry.environment
return template_resource.generate_class_from_template(str(self.name),
data, env)
def get_class_to_instantiate(self):
from conveyor.conveyorheat.engine.resources import template_resource
return template_resource.TemplateResource
class MapResourceInfo(ResourceInfo):
"""Store the mapping of one resource type to another.
like: OS::Networking::FloatingIp -> OS::Neutron::FloatingIp
"""
description = 'Mapping'
def get_class(self, files=None):
return None
def get_resource_info(self, resource_type=None, resource_name=None):
return self.registry.get_resource_info(self.value, resource_name)
class GlobResourceInfo(MapResourceInfo):
"""Store the mapping (with wild cards) of one resource type to another.
like: OS::Networking::* -> OS::Neutron::*
Also supports many-to-one mapping (mostly useful together with special
"OS::Heat::None" resource)
like: OS::* -> OS::Heat::None
"""
description = 'Wildcard Mapping'
def get_resource_info(self, resource_type=None, resource_name=None):
# NOTE(pas-ha) we end up here only when self.name already
# ends with * so truncate it
orig_prefix = self.name[:-1]
if self.value.endswith('*'):
new_type = self.value[:-1] + resource_type[len(orig_prefix):]
else:
new_type = self.value
return self.registry.get_resource_info(new_type, resource_name)
def matches(self, resource_type):
# prevent self-recursion in case of many-to-one mapping
match = (resource_type != self.value and
resource_type.startswith(self.name[:-1]))
return match
class ResourceRegistry(object):
"""By looking at the environment, find the resource implementation."""
def __init__(self, global_registry, env):
self._registry = {'resources': {}}
self.global_registry = global_registry
self.environment = env
def load(self, json_snippet):
self._load_registry([], json_snippet)
def register_class(self, resource_type, resource_class, path=None):
if path is None:
path = [resource_type]
ri = ResourceInfo(self, path, resource_class)
self._register_info(path, ri)
def _load_registry(self, path, registry):
for k, v in iter(registry.items()):
if v is None:
self._register_info(path + [k], None)
elif is_hook_definition(k, v) or is_valid_restricted_action(k, v):
self._register_item(path + [k], v)
elif isinstance(v, dict):
self._load_registry(path + [k], v)
else:
self._register_info(path + [k],
ResourceInfo(self, path + [k], v))
def _register_item(self, path, item):
name = path[-1]
registry = self._registry
for key in path[:-1]:
if key not in registry:
registry[key] = {}
registry = registry[key]
registry[name] = item
def _register_info(self, path, info):
"""Place the new info in the correct location in the registry.
:param path: a list of keys ['resources', 'my_srv', 'OS::Nova::Server']
"""
descriptive_path = '/'.join(path)
name = path[-1]
# create the structure if needed
registry = self._registry
for key in path[:-1]:
if key not in registry:
registry[key] = {}
registry = registry[key]
if info is None:
if name.endswith('*'):
# delete all matching entries.
for res_name in list(six.iterkeys(registry)):
if (isinstance(registry[res_name], ResourceInfo) and
res_name.startswith(name[:-1])):
LOG.warning(_LW('Removing %(item)s from %(path)s'), {
'item': res_name,
'path': descriptive_path})
del registry[res_name]
else:
# delete this entry.
LOG.warning(_LW('Removing %(item)s from %(path)s'), {
'item': name,
'path': descriptive_path})
registry.pop(name, None)
return
if name in registry and isinstance(registry[name], ResourceInfo):
if registry[name] == info:
return
details = {
'path': descriptive_path,
'was': str(registry[name].value),
'now': str(info.value)}
LOG.warning(_LW('Changing %(path)s from %(was)s to %(now)s'),
details)
if isinstance(info, ClassResourceInfo):
if info.value.support_status.status != support.SUPPORTED:
if info.value.support_status.message is not None:
details = {
'name': info.name,
'status': six.text_type(
info.value.support_status.status),
'message': six.text_type(
info.value.support_status.message)
}
LOG.warning(_LW('%(name)s is %(status)s. %(message)s'),
details)
info.user_resource = (self.global_registry is not None)
registry[name] = info
def log_resource_info(self, show_all=False, prefix=None):
registry = self._registry
prefix = '%s ' % prefix if prefix is not None else ''
for name in registry:
if name == 'resources':
continue
if show_all or isinstance(registry[name], TemplateResourceInfo):
msg = (_LI('%(p)sRegistered: %(t)s') %
{'p': prefix,
't': six.text_type(registry[name])})
LOG.info(msg)
def remove_item(self, info):
if not isinstance(info, TemplateResourceInfo):
return
registry = self._registry
for key in info.path[:-1]:
registry = registry[key]
if info.path[-1] in registry:
registry.pop(info.path[-1])
def get_rsrc_restricted_actions(self, resource_name):
"""Returns a set of restricted actions.
For a given resource we get the set of restricted actions.
Actions are set in this format via `resources`:
{
"restricted_actions": [update, replace]
}
A restricted_actions value is either `update`, `replace` or a list
of those values. Resources support wildcard matching. The asterisk
sign matches everything.
"""
ress = self._registry['resources']
restricted_actions = set()
for name_pattern, resource in six.iteritems(ress):
if fnmatch.fnmatchcase(resource_name, name_pattern):
if 'restricted_actions' in resource:
actions = resource['restricted_actions']
if isinstance(actions, six.string_types):
restricted_actions.add(actions)
elif isinstance(actions, collections.Sequence):
restricted_actions |= set(actions)
return restricted_actions
def matches_hook(self, resource_name, hook):
"""Return whether a resource have a hook set in the environment.
For a given resource and a hook type, we check to see if the passed
group of resources has the right hook associated with the name.
Hooks are set in this format via `resources`:
{
"res_name": {
"hooks": [pre-create, pre-update]
},
"*_suffix": {
"hooks": pre-create
},
"prefix_*": {
"hooks": pre-update
}
}
A hook value is either `pre-create`, `pre-update` or a list of those
values. Resources support wildcard matching. The asterisk sign matches
everything.
"""
ress = self._registry['resources']
for name_pattern, resource in six.iteritems(ress):
if fnmatch.fnmatchcase(resource_name, name_pattern):
if 'hooks' in resource:
hooks = resource['hooks']
if isinstance(hooks, six.string_types):
if hook == hooks:
return True
elif isinstance(hooks, collections.Sequence):
if hook in hooks:
return True
return False
def remove_resources_except(self, resource_name):
ress = self._registry['resources']
new_resources = {}
for name, res in six.iteritems(ress):
if fnmatch.fnmatchcase(resource_name, name):
new_resources.update(res)
if resource_name in ress:
new_resources.update(ress[resource_name])
self._registry['resources'] = new_resources
def iterable_by(self, resource_type, resource_name=None):
is_templ_type = resource_type.endswith(('.yaml', '.template'))
if self.global_registry is not None and is_templ_type:
# we only support dynamic resource types in user environments
# not the global environment.
# resource with a Type == a template
# we dynamically create an entry as it has not been registered.
if resource_type not in self._registry:
res = ResourceInfo(self, [resource_type], None)
self._register_info([resource_type], res)
yield self._registry[resource_type]
# handle a specific resource mapping.
if resource_name:
impl = self._registry['resources'].get(resource_name)
if impl and resource_type in impl:
yield impl[resource_type]
# handle: "OS::Nova::Server" -> "Rackspace::Cloud::Server"
impl = self._registry.get(resource_type)
if impl:
yield impl
# handle: "OS::*" -> "Dreamhost::*"
def is_a_glob(resource_type):
return resource_type.endswith('*')
globs = six.moves.filter(is_a_glob, six.iterkeys(self._registry))
for pattern in globs:
if self._registry[pattern].matches(resource_type):
yield self._registry[pattern]
def get_resource_info(self, resource_type, resource_name=None,
registry_type=None, ignore=None):
"""Find possible matches to the resource type and name.
Chain the results from the global and user registry to find
a match.
"""
# use cases
# 1) get the impl.
# - filter_by(res_type=X), sort_by(res_name=W, is_user=True)
# 2) in TemplateResource we need to get both the
# TemplateClass and the ResourceClass
# - filter_by(res_type=X, impl_type=TemplateResourceInfo),
# sort_by(res_name=W, is_user=True)
# - filter_by(res_type=X, impl_type=ClassResourceInfo),
# sort_by(res_name=W, is_user=True)
# 3) get_types() from the api
# - filter_by(is_user=False)
# 4) as_dict() to write to the db
# - filter_by(is_user=True)
if self.global_registry is not None:
giter = self.global_registry.iterable_by(resource_type,
resource_name)
else:
giter = []
matches = itertools.chain(self.iterable_by(resource_type,
resource_name),
giter)
for info in sorted(matches):
try:
match = info.get_resource_info(resource_type,
resource_name)
except exception.EntityNotFound:
continue
if registry_type is None or isinstance(match, registry_type):
if ignore is not None and match == ignore:
continue
# NOTE(prazumovsky): if resource_type defined in outer env
# there is a risk to lose it due to h-eng restarting, so
# store it to local env (exclude ClassResourceInfo because it
# loads from resources; TemplateResourceInfo handles by
# template_resource module).
if (match and not match.user_resource and
not isinstance(info, (TemplateResourceInfo,
ClassResourceInfo))):
self._register_info([resource_type], info)
return match
raise exception.EntityNotFound(entity='Resource Type',
name=resource_type)
def get_class(self, resource_type, resource_name=None, files=None):
info = self.get_resource_info(resource_type,
resource_name=resource_name)
return info.get_class(files=files)
def get_class_to_instantiate(self, resource_type, resource_name=None):
if resource_type == "":
msg = _('Resource "%s" has no type') % resource_name
raise exception.StackValidationFailed(message=msg)
elif resource_type is None:
msg = _('Non-empty resource type is required '
'for resource "%s"') % resource_name
raise exception.StackValidationFailed(message=msg)
elif not isinstance(resource_type, six.string_types):
msg = _('Resource "%s" type is not a string') % resource_name
raise exception.StackValidationFailed(message=msg)
try:
info = self.get_resource_info(resource_type,
resource_name=resource_name)
except exception.EntityNotFound as exc:
raise exception.StackValidationFailed(message=six.text_type(exc))
return info.get_class_to_instantiate()
def as_dict(self):
"""Return user resources in a dict format."""
def _as_dict(level):
tmp = {}
for k, v in iter(level.items()):
if isinstance(v, dict):
tmp[k] = _as_dict(v)
elif is_hook_definition(
k, v) or is_valid_restricted_action(k, v):
tmp[k] = v
elif v.user_resource:
tmp[k] = v.value
return tmp
return _as_dict(self._registry)
def get_types(self,
cnxt=None,
support_status=None,
type_name=None,
version=None):
"""Return a list of valid resource types."""
# validate the support status
if support_status is not None and not support.is_valid_status(
support_status):
msg = (_('Invalid support status and should be one of %s') %
six.text_type(support.SUPPORT_STATUSES))
raise exception.Invalid(reason=msg)
def is_resource(key):
return isinstance(self._registry[key], (ClassResourceInfo,
TemplateResourceInfo))
def status_matches(cls):
return (support_status is None or
cls.get_class().support_status.status ==
support_status)
def is_available(cls):
if cnxt is None:
return True
try:
return cls.get_class().is_service_available(cnxt)
except Exception:
return False
def not_hidden_matches(cls):
return cls.get_class().support_status.status != support.HIDDEN
def is_allowed(enforcer, name):
if cnxt is None:
return True
try:
enforcer.enforce(cnxt, name)
except enforcer.exc:
return False
else:
return True
enforcer = policy.ResourceEnforcer()
def name_matches(name):
try:
return type_name is None or re.match(type_name, name)
except: # noqa
return False
def version_matches(cls):
return (version is None or
cls.get_class().support_status.version == version)
return [name for name, cls in six.iteritems(self._registry)
if (is_resource(name) and
name_matches(name) and
status_matches(cls) and
is_available(cls) and
is_allowed(enforcer, name) and
not_hidden_matches(cls) and
version_matches(cls))]
class Environment(object):
def __init__(self, env=None, user_env=True):
"""Create an Environment from an input dict.
The dict may be in one of two formats:
1) old-school flat parameters; or
2) newer {resource_registry: bla, parameters: foo}
:param env: the json environment
:param user_env: boolean, if False then we manage python resources too.
"""
if env is None:
env = {}
if user_env:
from conveyor.conveyorheat.engine import resources
global_env = resources.global_env()
global_registry = global_env.registry
event_sink_classes = global_env.event_sink_classes
else:
global_registry = None
event_sink_classes = {}
self.registry = ResourceRegistry(global_registry, self)
self.registry.load(env.get(env_fmt.RESOURCE_REGISTRY, {}))
self.param_defaults = env.get(env_fmt.PARAMETER_DEFAULTS, {})
self.encrypted_param_names = env.get(env_fmt.ENCRYPTED_PARAM_NAMES, [])
if env_fmt.PARAMETERS in env:
self.params = env[env_fmt.PARAMETERS]
else:
self.params = dict((k, v) for (k, v) in six.iteritems(env)
if k not in (env_fmt.PARAMETER_DEFAULTS,
env_fmt.ENCRYPTED_PARAM_NAMES,
env_fmt.EVENT_SINKS,
env_fmt.RESOURCE_REGISTRY))
self.event_sink_classes = event_sink_classes
self._event_sinks = []
self._built_event_sinks = []
self._update_event_sinks(env.get(env_fmt.EVENT_SINKS, []))
self.constraints = {}
self.stack_lifecycle_plugins = []
def load(self, env_snippet):
self.registry.load(env_snippet.get(env_fmt.RESOURCE_REGISTRY, {}))
self.params.update(env_snippet.get(env_fmt.PARAMETERS, {}))
self.param_defaults.update(
env_snippet.get(env_fmt.PARAMETER_DEFAULTS, {}))
self._update_event_sinks(env_snippet.get(env_fmt.EVENT_SINKS, []))
def user_env_as_dict(self):
"""Get the environment as a dict, ready for storing in the db."""
return {env_fmt.RESOURCE_REGISTRY: self.registry.as_dict(),
env_fmt.PARAMETERS: self.params,
env_fmt.PARAMETER_DEFAULTS: self.param_defaults,
env_fmt.ENCRYPTED_PARAM_NAMES: self.encrypted_param_names,
env_fmt.EVENT_SINKS: self._event_sinks}
def register_class(self, resource_type, resource_class, path=None):
self.registry.register_class(resource_type, resource_class, path=path)
def register_constraint(self, constraint_name, constraint):
self.constraints[constraint_name] = constraint
def register_stack_lifecycle_plugin(self, stack_lifecycle_name,
stack_lifecycle_class):
self.stack_lifecycle_plugins.append((stack_lifecycle_name,
stack_lifecycle_class))
def register_event_sink(self, event_sink_name, event_sink_class):
self.event_sink_classes[event_sink_name] = event_sink_class
def get_class(self, resource_type, resource_name=None, files=None):
return self.registry.get_class(resource_type, resource_name,
files=files)
def get_class_to_instantiate(self, resource_type, resource_name=None):
return self.registry.get_class_to_instantiate(resource_type,
resource_name)
def get_types(self,
cnxt=None,
support_status=None,
type_name=None,
version=None):
return self.registry.get_types(cnxt,
support_status=support_status,
type_name=type_name,
version=version)
def get_resource_info(self, resource_type, resource_name=None,
registry_type=None, ignore=None):
return self.registry.get_resource_info(resource_type, resource_name,
registry_type, ignore=ignore)
def get_constraint(self, name):
return self.constraints.get(name)
def get_stack_lifecycle_plugins(self):
return self.stack_lifecycle_plugins
def _update_event_sinks(self, sinks):
self._event_sinks.extend(sinks)
for sink in sinks:
sink = sink.copy()
sink_class = sink.pop('type')
sink_class = self.event_sink_classes[sink_class]
self._built_event_sinks.append(sink_class(**sink))
def get_event_sinks(self):
return self._built_event_sinks
def get_child_environment(parent_env, child_params, item_to_remove=None,
child_resource_name=None):
"""Build a child environment using the parent environment and params.
This is built from the child_params and the parent env so some
resources can use user-provided parameters as if they come from an
environment.
1. resource_registry must be merged (child env should be loaded after the
parent env to take precedence).
2. child parameters must overwrite the parent's as they won't be relevant
in the child template.
If `child_resource_name` is provided, resources in the registry will be
replaced with the contents of the matching child resource plus anything
that passes a wildcard match.
"""
def is_flat_params(env_or_param):
if env_or_param is None:
return False
for sect in env_fmt.SECTIONS:
if sect in env_or_param:
return False
return True
child_env = parent_env.user_env_as_dict()
child_env[env_fmt.PARAMETERS] = {}
flat_params = is_flat_params(child_params)
new_env = Environment()
if flat_params and child_params is not None:
child_env[env_fmt.PARAMETERS] = child_params
new_env.load(child_env)
if not flat_params and child_params is not None:
new_env.load(child_params)
if item_to_remove is not None:
new_env.registry.remove_item(item_to_remove)
if child_resource_name:
new_env.registry.remove_resources_except(child_resource_name)
return new_env
def read_global_environment(env, env_dir=None):
if env_dir is None:
cfg.CONF.import_opt('environment_dir',
'conveyor.conveyorheat.common.config')
env_dir = cfg.CONF.environment_dir
try:
env_files = glob.glob(os.path.join(env_dir, '*'))
except OSError as osex:
LOG.error(_LE('Failed to read %s'), env_dir)
LOG.exception(osex)
return
for file_path in env_files:
try:
with open(file_path) as env_fd:
LOG.info(_LI('Loading %s'), file_path)
env_body = env_fmt.parse(env_fd.read())
env_fmt.default_for_missing(env_body)
env.load(env_body)
except ValueError as vex:
LOG.error(_LE('Failed to parse %(file_path)s'), {
'file_path': file_path})
LOG.exception(vex)
except IOError as ioex:
LOG.error(_LE('Failed to read %(file_path)s'), {
'file_path': file_path})
LOG.exception(ioex)
| |
from django.views.generic import DetailView, TemplateView, CreateView, View, FormView
from django.shortcuts import get_object_or_404
from django.http import HttpResponse, Http404, HttpResponseBadRequest, HttpResponseRedirect
from django.db.models import Q, Sum, F, Min
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.urls import reverse
from django.utils.translation import gettext_lazy as _, gettext
from django.utils import timezone
from django.contrib.messages.views import SuccessMessageMixin
from django.forms.models import model_to_dict
from datetime import datetime
from dateutil.relativedelta import relativedelta
import unicodecsv as csv
from calendar import month_name
from urllib.parse import unquote_plus
from braces.views import PermissionRequiredMixin, StaffuserRequiredMixin, UserFormKwargsMixin
from collections import OrderedDict
from itertools import chain
import re
from danceschool.core.models import Instructor, Location, Event, StaffMember, EventStaffCategory
from danceschool.core.constants import getConstant
from danceschool.core.mixins import StaffMemberObjectMixin, FinancialContextMixin, AdminSuccessURLMixin
from danceschool.core.utils.timezone import ensure_timezone, ensure_localtime
from danceschool.core.utils.requests import getIntFromGet, getDateTimeFromGet
from .models import ExpenseItem, RevenueItem, ExpenseCategory, RevenueCategory, RepeatedExpenseRule, StaffMemberWageInfo
from .helpers import (
prepareFinancialStatement, getExpenseItemsCSV, getRevenueItemsCSV, prepareStatementByPeriod,
prepareStatementByEvent, createExpenseItemsForEvents, createExpenseItemsForVenueRental, createGenericExpenseItems,
createRevenueItemsForRegistrations
)
from .forms import (
ExpenseReportingForm, RevenueReportingForm, CompensationRuleUpdateForm,
CompensationRuleResetForm, ExpenseRuleGenerationForm
)
from .constants import EXPENSE_BASES
class ExpenseReportingView(
AdminSuccessURLMixin, StaffuserRequiredMixin, UserFormKwargsMixin,
SuccessMessageMixin, CreateView
):
template_name = 'cms/forms/display_crispy_form_classbased_admin.html'
form_class = ExpenseReportingForm
success_message = _('Expense item successfully submitted.')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'form_title': _('Report Expenses'),
'form_description': _('Use this form to report expenses.'),
})
return context
class RevenueReportingView(
AdminSuccessURLMixin, StaffuserRequiredMixin, UserFormKwargsMixin,
SuccessMessageMixin, CreateView
):
template_name = 'cms/forms/display_crispy_form_classbased_admin.html'
form_class = RevenueReportingForm
success_message = _('Revenue item successfully submitted.')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'form_title': _('Report Revenues'),
'form_description': _('Use this form to report revenues.'),
})
return context
class StaffMemberPaymentsView(StaffMemberObjectMixin, PermissionRequiredMixin, DetailView):
model = StaffMember
template_name = 'financial/staffmember_payments.html'
permission_required = 'core.view_own_instructor_finances'
as_csv = False
def get_context_data(self, **kwargs):
staff_member = self.object
context = {}
query_filter = Q()
# These will be passed to the template
year = self.kwargs.get('year')
eligible_years = list(set([
x.year for x in ExpenseItem.objects.values_list(
'accrualDate', flat=True
).distinct()
]))
eligible_years.sort(reverse=True)
if not year or year == 'all':
int_year = None
year = 'all'
else:
try:
int_year = int(year)
# Check for year in kwargs and ensure that it is eligible
if int_year not in eligible_years:
raise Http404(_("Invalid year."))
query_filter = query_filter & (
Q(accrualDate__year=int_year) |
Q(paymentDate__year=int_year) |
Q(submissionDate__year=int_year)
)
except (ValueError, TypeError):
raise Http404(_("Invalid year."))
# No point in continuing if we can't actually match this staff member to their payments.
if not hasattr(staff_member, 'userAccount'):
return super(DetailView, self).get_context_data(staff_member=staff_member)
all_payments = getattr(
getattr(staff_member, 'transactionparty'),
'expenseitem_set',
ExpenseItem.objects.none()
).filter(query_filter).order_by('-submissionDate')
paid_items = all_payments.filter(
paid=True, reimbursement=False
).order_by('-paymentDate')
unpaid_items = all_payments.filter(paid=False).order_by('-submissionDate')
reimbursement_items = all_payments.filter(
paid=True, reimbursement=True
).order_by('-paymentDate')
if int_year:
time_lb = ensure_timezone(datetime(int_year, 1, 1, 0, 0))
time_ub = ensure_timezone(datetime(int_year + 1, 1, 1, 0, 0))
else:
time_lb = ensure_timezone(datetime(timezone.now().year, 1, 1, 0, 0))
time_ub = ensure_timezone(datetime(timezone.now().year + 1, 1, 1, 0, 0))
paid_this_year = paid_items.filter(
paymentDate__gte=time_lb, paymentDate__lt=time_ub
).order_by('-paymentDate')
accrued_paid_this_year = paid_items.filter(
accrualDate__gte=time_lb, accrualDate__lt=time_ub
).order_by('-paymentDate')
reimbursements_this_year = all_payments.filter(
paymentDate__gte=time_lb, paymentDate__lt=time_ub,
paid=True, reimbursement=True
)
context.update({
'instructor': staff_member, # DEPRECATED
'staff_member': staff_member,
'current_year': year,
'eligible_years': eligible_years,
'all_payments': all_payments,
'paid_items': paid_items,
'unpaid_items': unpaid_items,
'reimbursement_items': reimbursement_items,
'paid_this_year': paid_this_year,
'accrued_paid_this_year': accrued_paid_this_year,
'reimbursements_this_year': reimbursements_this_year,
'total_paid_alltime': sum(filter(None, [x.total for x in paid_items])),
'total_awaiting_payment': sum(filter(None, [x.total for x in unpaid_items])),
'total_paid_this_year': sum(filter(None, [x.total for x in paid_this_year])),
'total_reimbursements': sum(filter(None, [x.total for x in reimbursements_this_year])),
})
# Note: This get the detailview's context, not all the mixins. Supering itself led to an infinite loop.
return super(DetailView, self).get_context_data(**context)
def dispatch(self, request, *args, **kwargs):
if 'as_csv' in kwargs:
self.as_csv = True
return super().dispatch(request, *args, **kwargs)
def render_to_response(self, context, **response_kwargs):
if self.as_csv:
return self.render_to_csv(context)
return super().render_to_response(context, **response_kwargs)
def render_to_csv(self, context):
staff_member = context['staff_member']
if hasattr(getattr(staff_member, 'transactionparty', None), 'expenseitem_set'):
all_expenses = context['all_payments']
else:
all_expenses = ExpenseItem.objects.none()
return getExpenseItemsCSV(all_expenses, scope='instructor')
class OtherStaffMemberPaymentsView(StaffMemberPaymentsView):
permission_required = 'core.view_other_instructor_finances'
def get_object(self, queryset=None):
if 'first_name' in self.kwargs and 'last_name' in self.kwargs:
first_name = re.sub('^_$', '', self.kwargs['first_name'])
last_name = re.sub('^_$', '', self.kwargs['last_name'])
return get_object_or_404(
StaffMember.objects.filter(
firstName=unquote_plus(first_name).replace('_', ' '),
lastName=unquote_plus(last_name).replace('_', ' ')
)
)
else:
return None
class FinancesByEventView(PermissionRequiredMixin, TemplateView):
permission_required = 'financial.view_finances_byevent'
cache_timeout = 3600
template_name = 'financial/finances_byevent.html'
as_csv = False
paginate_by = 25
def get_paginate_by(self, queryset=None):
if self.as_csv:
return 1000
else:
return self.paginate_by
def get_context_data(self, **kwargs):
context = {}
# These will be passed to the template
year = self.kwargs.get('year')
eligible_years = list(set(
[
x.year for x in
ExpenseItem.objects.values_list('accrualDate', flat=True).distinct()
]
))
eligible_years.sort(reverse=True)
if not year or year == 'all':
int_year = None
year = 'all'
else:
try:
int_year = int(year)
# Check for year in kwargs and ensure that it is eligible
if int_year not in eligible_years:
raise Http404(_("Invalid year."))
except (ValueError, TypeError):
raise Http404(_("Invalid year."))
context['current_year'] = year
context['eligible_years'] = eligible_years
page = self.kwargs.get('page') or self.request.GET.get('page') or 1
context['statement'] = prepareFinancialStatement(year=int_year)
paginator, page_obj, statementByEvent, is_paginated = prepareStatementByEvent(
year=int_year, page=page, paginate_by=self.get_paginate_by()
)
context.update({
'paginator': paginator,
'page_obj': page_obj,
'is_paginated': is_paginated,
})
context['statement']['statementByEvent'] = statementByEvent
# Get a list of all roles with positive registrations in the statement:
role_set = set()
for x in statementByEvent:
role_set.update(list(x.get('registrations').keys()))
role_list = list(role_set)
sorted(role_list, key=lambda x: (x is None, x))
context['roles'] = role_list
return super().get_context_data(**context)
def dispatch(self, request, *args, **kwargs):
if 'as_csv' in kwargs:
self.as_csv = True
return super().dispatch(request, *args, **kwargs)
def render_to_response(self, context, **response_kwargs):
if self.as_csv:
return self.render_to_csv(context)
return super().render_to_response(context, **response_kwargs)
def render_to_csv(self, context):
statement = context['statement']
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="financialStatementByEvent.csv"'
roles = set()
for y in statement['statementByEvent']:
roles.update(list(y.get('registrations', {}).keys()))
writer = csv.writer(response, csv.excel)
response.write(u'\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)
header_list = [
_('Event'),
_('Month'),
]
for role in roles:
header_list.append(
_('Registrations: {role}'.format(role=str(role or _('Unspecified')).title()))
)
header_list += [
_('Revenues: Gross'),
_('Revenues: Net'),
_('Expenses: Instruction'),
_('Expenses: Venue'),
_('Expenses: Other'),
_('Expenses: Total'),
_('Net Profit'),
]
writer.writerow(header_list)
for x in statement['statementByEvent']:
this_row_data = [
x['event_name'],
x['month_name'],
]
for role in roles:
this_row_data.append(x.get('registrations', {}).get(role, 0))
this_row_data += [
x['revenues']['gross'],
x['revenues']['net'],
x['expenses']['instruction'],
x['expenses']['venue'],
x['expenses']['other'],
x['expenses']['total'],
x['net_profit'],
]
writer.writerow(this_row_data)
return response
class FinancesByPeriodView(PermissionRequiredMixin, TemplateView):
permission_required = 'financial.view_finances_bymonth'
cache_timeout = 3600
template_name = 'financial/finances_byperiod.html'
as_csv = False
paginate_by = 24
period_type = None
base_view = None
base_view_csv = None
def get_paginate_by(self, queryset=None):
if self.as_csv:
return 1000
else:
return self.paginate_by
def get(self, request, *args, **kwargs):
'''
Allow passing of basis and time limitations
'''
try:
year = int(self.kwargs.get('year'))
except (ValueError, TypeError):
year = getIntFromGet(request, 'year')
kwargs.update({
'year': year,
'basis': request.GET.get('basis'),
})
if kwargs.get('basis') not in EXPENSE_BASES.keys():
kwargs['basis'] = 'accrualDate'
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = {}
# Determine the period over which the statement should be produced.
year = kwargs.get('year')
eligible_years = list(set(
[x.year for x in ExpenseItem.objects.values_list('accrualDate', flat=True).distinct()]
))
eligible_years.sort(reverse=True)
if year and year not in eligible_years:
raise Http404(_("Invalid year."))
context.update({
'basis': kwargs.get('basis'),
'basis_name': EXPENSE_BASES[kwargs.get('basis')],
'year': year,
'current_year': year or 'all',
'eligible_years': eligible_years,
'period_type': self.period_type,
'base_view': self.base_view,
'base_view_csv': self.base_view_csv,
})
page = self.kwargs.get('page') or self.request.GET.get('page') or 1
context['statement'] = prepareFinancialStatement(year=year)
paginator, page_obj, statementByPeriod, is_paginated = prepareStatementByPeriod(
year=year, basis=context['basis'], type=self.period_type,
page=page, paginate_by=self.get_paginate_by()
)
context.update({
'paginator': paginator,
'page_obj': page_obj,
'is_paginated': is_paginated,
})
context['statement']['statementByPeriod'] = statementByPeriod
return super().get_context_data(**context)
def dispatch(self, request, *args, **kwargs):
if 'as_csv' in kwargs:
self.as_csv = True
return super().dispatch(request, *args, **kwargs)
def render_to_response(self, context, **response_kwargs):
if self.as_csv:
return self.render_to_csv(context)
return super().render_to_response(context, **response_kwargs)
def render_to_csv(self, context):
statement = context['statement']
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = \
'attachment; filename="financialStatementBy{}.csv"'.format(
str(self.period_type).title()
)
writer = csv.writer(response, csv.excel)
response.write(u'\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)
header_list = [
_('Month Name'),
_('Revenues: Net'),
_('Expenses: Instruction'),
_('Expenses: Venue'),
_('Expenses: Other'),
_('Expenses: Total'),
_('Registrations'),
_('Net Profit'),
]
writer.writerow(header_list)
for x in statement['statementByPeriod']:
this_row_data = [
x['period_name'],
x['revenues'],
x['expenses']['instruction'],
x['expenses']['venue'],
x['expenses']['other'],
x['expenses']['total'],
x['registrations'],
x['net_profit'],
]
writer.writerow(this_row_data)
return response
class FinancesByMonthView(FinancesByPeriodView):
period_type = 'month'
base_view = 'financesByMonth'
base_view_csv = 'financesByMonthCSV'
class FinancesByDateView(FinancesByPeriodView):
period_type = 'date'
base_view = 'financesByDate'
base_view_csv = 'financesByDateCSV'
class FinancialDetailView(FinancialContextMixin, PermissionRequiredMixin, TemplateView):
permission_required = 'financial.view_finances_detail'
template_name = 'financial/finances_detail.html'
def get(self, request, *args, **kwargs):
'''
Pass any permissable GET data. URL parameters override GET parameters
'''
try:
year = int(self.kwargs.get('year'))
except (ValueError, TypeError):
year = getIntFromGet(request, 'year')
if self.kwargs.get('month'):
try:
month = int(self.kwargs.get('month'))
except (ValueError, TypeError):
try:
month = list(month_name).index(self.kwargs.get('month').title())
except (ValueError, TypeError):
month = None
else:
month = getIntFromGet(request, 'month')
try:
day = int(self.kwargs.get('day'))
except (ValueError, TypeError):
day = getIntFromGet(request, 'day')
try:
event_id = int(self.kwargs.get('event'))
except (ValueError, TypeError):
event_id = getIntFromGet(request, 'event')
event = None
if event_id:
try:
event = Event.objects.get(id=event_id)
except ObjectDoesNotExist:
pass
kwargs.update({
'year': year,
'month': month,
'day': day,
'startDate': getDateTimeFromGet(request, 'startDate'),
'endDate': getDateTimeFromGet(request, 'endDate'),
'basis': request.GET.get('basis'),
'event': event,
})
if kwargs.get('basis') not in EXPENSE_BASES.keys():
kwargs['basis'] = 'accrualDate'
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def get_context_data(self, **kwargs):
context = kwargs.copy()
timeFilters = {}
# Determine the period over which the statement should be produced.
year = kwargs.get('year')
month = kwargs.get('month')
day = kwargs.get('day')
startDate = kwargs.get('startDate')
endDate = kwargs.get('endDate')
event = kwargs.get('event')
basis = kwargs.get('basis')
context.update({
'basis': basis,
'basis_name': EXPENSE_BASES[basis],
'rangeTitle': '',
})
if event:
timeFilters['event'] = event
context['rangeTitle'] += '%s ' % event.name
if startDate:
timeFilters['%s__gte' % basis] = startDate
context['rangeType'] = 'Date Range'
context['rangeTitle'] += str(_('From %s ' % startDate.strftime('%b. %d, %Y')))
if endDate:
timeFilters['%s__lt' % basis] = endDate
context['rangeType'] = 'Date Range'
context['rangeTitle'] += str(_('To %s ' % endDate.strftime('%b. %d, %Y')))
if not startDate and not endDate:
start = None
delta = None
if day and month and year:
start = ensure_localtime(datetime(year, month, day))
delta = relativedelta(days=1)
context.update({
'rangeType': 'Day',
'rangeTitle': start.strftime('%B %d, %Y')
})
elif month and year:
start = ensure_localtime(datetime(year, month, 1))
delta = relativedelta(months=1)
context.update({
'rangeType': 'Month',
'rangeTitle': start.strftime('%B %Y')
})
elif event:
context['rangeType'] = 'Event'
elif year:
start = ensure_localtime(datetime(year, 1, 1))
delta = relativedelta(years=1)
context.update({
'rangeType': 'Year',
'rangeTitle': start.strftime('%Y')
})
else:
start = ensure_localtime(datetime(timezone.now().year, 1, 1))
delta = relativedelta(years=1)
context.update({
'rangeType': 'YTD',
'rangeTitle': _('Calendar Year To Date')
})
if start and delta:
timeFilters['%s__gte' % basis] = start
timeFilters['%s__lt' % basis] = start + delta
context['startDate'] = timeFilters.get('%s__gte' % basis)
context['endDate'] = timeFilters.get('%s__lt' % basis)
# Revenues are booked on receipt basis, not payment/approval basis
rev_timeFilters = timeFilters.copy()
rev_basis = basis
if basis in ['paymentDate', 'approvalDate']:
rev_basis = 'receivedDate'
if rev_timeFilters.get('%s__gte' % basis):
rev_timeFilters['receivedDate__gte'] = rev_timeFilters.get('%s__gte' % basis)
rev_timeFilters.pop('%s__gte' % basis, None)
if rev_timeFilters.get('%s__lt' % basis):
rev_timeFilters['receivedDate__lt'] = rev_timeFilters.get('%s__lt' % basis)
rev_timeFilters.pop('%s__lt' % basis, None)
expenseItems = ExpenseItem.objects.filter(**timeFilters).annotate(
net=F('total') + F('adjustments') + F('fees'),
basisDate=Min(basis)
).order_by(basis)
revenueItems = RevenueItem.objects.filter(**rev_timeFilters).annotate(
net=F('total') + F('adjustments') - F('fees'),
basisDate=Min(rev_basis)
).order_by(rev_basis)
context['expenseItems'] = expenseItems
context['revenueItems'] = revenueItems
# Registration revenues, instruction and venue expenses
# are broken out separately.
context.update({
'instructionExpenseItems': expenseItems.filter(
category__in=[
getConstant('financial__classInstructionExpenseCat'),
getConstant('financial__assistantClassInstructionExpenseCat')
]
).order_by('payTo__name'),
'venueExpenseItems': expenseItems.filter(
category=getConstant('financial__venueRentalExpenseCat')
).order_by('payTo__name'),
'otherExpenseItems': expenseItems.exclude(
category__in=[
getConstant('financial__classInstructionExpenseCat'),
getConstant('financial__assistantClassInstructionExpenseCat'),
getConstant('financial__venueRentalExpenseCat')
]
).order_by('category'),
'expenseCategoryTotals': ExpenseCategory.objects.filter(
expenseitem__in=expenseItems
).annotate(
category_total=Sum('expenseitem__total'),
category_adjustments=Sum('expenseitem__adjustments'),
category_fees=Sum('expenseitem__fees')
).annotate(
category_net=F('category_total') + F('category_adjustments') + F('category_fees')
),
})
context.update({
'instructionExpenseInstructorTotals': StaffMember.objects.filter(
transactionparty__expenseitem__in=context['instructionExpenseItems']
).annotate(
instructor_total=Sum('transactionparty__expenseitem__total'),
instructor_adjustments=Sum('transactionparty__expenseitem__adjustments'),
instructor_fees=Sum('transactionparty__expenseitem__fees')
).annotate(
instructor_net=F('instructor_total') + F('instructor_adjustments') + F('instructor_fees')
),
'instructionExpenseOtherTotal': context['instructionExpenseItems'].filter(
payTo__staffMember__isnull=True
).annotate(
net=F('total') + F('adjustments') + F('fees')
).aggregate(
instructor_total=Sum('total'),
instructor_adjustments=Sum('adjustments'),
instructor_fees=Sum('fees'),
instructor_net=Sum('net')
),
'venueExpenseVenueTotals': Location.objects.filter(
transactionparty__expenseitem__in=context['venueExpenseItems']
).annotate(
location_total=Sum('transactionparty__expenseitem__total'),
location_adjustments=Sum('transactionparty__expenseitem__adjustments'),
location_fees=Sum('transactionparty__expenseitem__fees')
).annotate(
location_net=F('location_total') + F('location_adjustments') + F('location_fees')
),
'venueExpenseOtherTotal': context['venueExpenseItems'].filter(
payTo__location__isnull=True
).annotate(
location_net=F('total') + F('adjustments') + F('fees')
).aggregate(
location_total=Sum('total'),
location_adjustments=Sum('adjustments'),
location_fees=Sum('fees'), location_net=Sum('net')
),
'totalInstructionExpenses': sum([
x.category_net or 0 for x in
context['expenseCategoryTotals'].filter(
id__in=[
getConstant('financial__classInstructionExpenseCat').id,
getConstant('financial__assistantClassInstructionExpenseCat').id
]
)
]),
'totalVenueExpenses': sum([
x.category_net or 0 for x in
context['expenseCategoryTotals'].filter(
id=getConstant('financial__venueRentalExpenseCat').id
)
]),
'totalOtherExpenses': sum([
x.category_net or 0 for x in
context['expenseCategoryTotals'].exclude(
id__in=[
getConstant('financial__classInstructionExpenseCat').id,
getConstant('financial__assistantClassInstructionExpenseCat').id,
getConstant('financial__venueRentalExpenseCat').id
]
)
]),
'totalExpenses': sum([x.category_net or 0 for x in context['expenseCategoryTotals']]),
})
context.update({
'registrationRevenueItems': revenueItems.filter(
category=getConstant('financial__registrationsRevenueCat')
).order_by('-event__startTime', 'event__uuid'),
'otherRevenueItems': revenueItems.exclude(
category=getConstant('financial__registrationsRevenueCat')
).order_by('category'),
'revenueCategoryTotals': RevenueCategory.objects.filter(
revenueitem__in=revenueItems
).annotate(
category_total=Sum('revenueitem__total'),
category_adjustments=Sum('revenueitem__adjustments'),
category_fees=Sum('revenueitem__fees')
).annotate(
category_net=F('category_total') + F('category_adjustments') - F('category_fees')
),
})
context.update({
'registrationRevenueEventTotals': Event.objects.filter(
revenueitem__in=context['registrationRevenueItems']
).annotate(
event_total=Sum('revenueitem__total'),
event_adjustments=Sum('revenueitem__adjustments'),
event_fees=Sum('revenueitem__fees')
).annotate(
event_net=F('event_total') + F('event_adjustments') - F('event_fees')
),
'registrationRevenueOtherTotal': context['registrationRevenueItems'].filter(
event__isnull=True
).annotate(
event_net=F('total') + F('adjustments') - F('fees')
).aggregate(
event_total=Sum('total'),
event_adjustments=Sum('adjustments'),
event_fees=Sum('fees'),
event_net=Sum('net')
),
'totalRegistrationRevenues': sum([
x.category_net or 0 for x in
context['revenueCategoryTotals'].filter(
id=getConstant('financial__registrationsRevenueCat').id
)
]),
'totalOtherRevenues': sum([
x.category_net or 0 for x in
context['revenueCategoryTotals'].exclude(
id=getConstant('financial__registrationsRevenueCat').id
)
]),
'totalRevenues': sum([x.category_net or 0 for x in context['revenueCategoryTotals']]),
})
context.update({
'netProfit': context['totalRevenues'] - context['totalExpenses'],
})
return super().get_context_data(**context)
class CompensationActionView(
SuccessMessageMixin, AdminSuccessURLMixin, PermissionRequiredMixin,
FinancialContextMixin, FormView
):
'''
Base class with repeated logic for update and replace actions.
'''
permission_required = 'core.change_staffmember'
def dispatch(self, request, *args, **kwargs):
ids = request.GET.get('ids')
ct = getIntFromGet(request, 'ct')
try:
contentType = ContentType.objects.get(id=ct)
self.objectClass = contentType.model_class()
except (ValueError, ObjectDoesNotExist):
return HttpResponseBadRequest(_('Invalid content type passed.'))
# This view only deals with StaffMember
if not isinstance(self.objectClass(), StaffMember):
return HttpResponseBadRequest(_('Invalid content type passed.'))
try:
self.queryset = self.objectClass.objects.filter(id__in=[int(x) for x in ids.split(', ')])
except ValueError:
return HttpResponseBadRequest(_('Invalid ids passed'))
return super().dispatch(request, *args, **kwargs)
def get_form_kwargs(self, **kwargs):
''' pass the list of staff members along to the form '''
kwargs = super().get_form_kwargs(**kwargs)
kwargs['staffmembers'] = self.queryset
return kwargs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update({
'staffmembers': self.queryset,
'rateRuleValues': dict(RepeatedExpenseRule.RateRuleChoices.choices),
})
return context
class CompensationRuleUpdateView(CompensationActionView):
'''
This view is for an admin action to bulk update staff member compensation information.
'''
template_name = 'financial/update_staff_compensation_rules.html'
form_class = CompensationRuleUpdateForm
success_message = _('Staff member compensation rules updated successfully.')
def form_valid(self, form):
category = form.cleaned_data.pop('category', None)
for staffmember in self.queryset:
staffmember.expenserules.update_or_create(
category=category,
defaults=form.cleaned_data,
)
return super().form_valid(form)
class CompensationRuleResetView(CompensationActionView):
'''
This view is for an admin action to bulk delete custom staff member compensation information
and/or reset to category defaults.
'''
template_name = 'financial/reset_staff_compensation_rules.html'
form_class = CompensationRuleResetForm
success_message = _('Staff member compensation rules reset successfully.')
def form_valid(self, form):
resetHow = form.cleaned_data.get('resetHow')
cat_numbers = [
int(x.split('_')[1]) for x in [
y[0] for y in form.cleaned_data.items() if y[1] and 'category_' in y[0]
]
]
if resetHow == 'DELETE':
StaffMemberWageInfo.objects.filter(staffMember__in=self.queryset, category__in=cat_numbers).delete()
elif resetHow == 'COPY':
cats = EventStaffCategory.objects.filter(id__in=cat_numbers, defaultwage__isnull=False)
for this_cat in cats:
this_default = model_to_dict(
this_cat.defaultwage,
exclude=('category', 'id', 'repeatedexpenserule_ptr', 'lastRun')
)
for staffmember in self.queryset:
staffmember.expenserules.update_or_create(
category=this_cat,
defaults=this_default,
)
return super().form_valid(form)
class ExpenseRuleGenerationView(AdminSuccessURLMixin, PermissionRequiredMixin, FormView):
template_name = 'financial/expense_generation.html'
form_class = ExpenseRuleGenerationForm
permission_required = 'financial.can_generate_repeated_expenses'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
fields = getattr(context.get('form', {}), 'fields', OrderedDict())
context.update({
'form_title': _('Generate rule-based financial items'),
'form_description': _(
'This form is used to generate expense items and revenue items ' +
'based on pre-set repeated expense rules. Please check the boxes ' +
'for the rules that you wish to apply. Depending on your site ' +
'settings, regular automatic generation of these financial items ' +
'may already be occurring. Using this form should not lead duplicate ' +
'items to be generated under these rules.'
),
'staff_keys': [
key for key in fields.keys()
if key.startswith('staff') and key != 'staff'
],
'venue_keys': [
key for key in fields.keys()
if key.startswith('location') or key.startswith('room')
],
'generic_keys': [
key for key in fields.keys()
if key.startswith('generic') and key != 'generic'
],
})
return context
def form_valid(self, form):
try:
generic_rules = RepeatedExpenseRule.objects.filter(id__in=[
int(key.split('_')[-1]) for key, value in form.cleaned_data.items() if key.startswith('rule_') and value
]).order_by('id')
location_rules = RepeatedExpenseRule.objects.filter(id__in=[
int(key.split('_')[-1]) for key, value in form.cleaned_data.items() if (
key.startswith('locationrule_') or key.startswith('roomrule_')
) and value
]).order_by('id')
staff_rules = RepeatedExpenseRule.objects.filter(id__in=[
int(key.split('_')[-1]) for key, value in form.cleaned_data.items() if (
key.startswith('staffdefaultrule_') or key.startswith('staffmemberrule_')
) and value
]).order_by('id')
except ValueError:
return HttpResponseBadRequest(_('Invalid rules provided.'))
response_items = [
{
'name': x.ruleName,
'id': x.id,
'type': _('Venue rental'),
'created': createExpenseItemsForVenueRental(rule=x)
} for x in location_rules
]
response_items += [
{
'name': x.ruleName,
'id': x.id,
'type': _('Staff expenses'),
'created': createExpenseItemsForEvents(rule=x)
} for x in staff_rules
]
response_items += [
{
'name': x.ruleName,
'id': x.id,
'type': _('Other expenses'),
'created': createGenericExpenseItems(rule=x)
} for x in generic_rules
]
if form.cleaned_data.get('registrations'):
response_items += [{
'name': _('Revenue items for registrations'),
'type': _('Revenue items for registrations'),
'created': createRevenueItemsForRegistrations()
}, ]
success_message = gettext(
'Successfully created {count} financial items.'.format(
count=sum([x.get('created', 0) or 0 for x in response_items])
)
)
messages.success(self.request, success_message)
return HttpResponseRedirect(self.get_success_url())
class AllExpensesViewCSV(PermissionRequiredMixin, View):
permission_required = 'financial.export_financial_data'
def dispatch(self, request, *args, **kwargs):
all_expenses = ExpenseItem.objects.order_by('-paid', '-approved', '-submissionDate')
return getExpenseItemsCSV(all_expenses, scope='all')
class AllRevenuesViewCSV(PermissionRequiredMixin, View):
permission_required = 'financial.export_financial_data'
def dispatch(self, request, *args, **kwargs):
all_revenues = RevenueItem.objects.order_by('-submissionDate')
return getRevenueItemsCSV(all_revenues)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.