gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nsip6(base_resource) :
""" Configuration for ip6 resource. """
def __init__(self) :
self._ipv6address = ""
self._scope = ""
self._type = ""
self._vlan = 0
self._nd = ""
self._icmp = ""
self._vserver = ""
self._telnet = ""
self._ftp = ""
self._gui = ""
self._ssh = ""
self._snmp = ""
self._mgmtaccess = ""
self._restrictaccess = ""
self._dynamicrouting = ""
self._hostroute = ""
self._ip6hostrtgw = ""
self._metric = 0
self._vserverrhilevel = ""
self._ospf6lsatype = ""
self._ospfarea = 0
self._state = ""
self._map = ""
self._ownernode = 0
self._td = 0
self._iptype = []
self._curstate = ""
self._viprtadv2bsd = False
self._vipvsercount = 0
self._vipvserdowncount = 0
self._systemtype = ""
self.___count = 0
@property
def ipv6address(self) :
"""IPv6 address to create on the NetScaler appliance.<br/>Minimum length = 1.
"""
try :
return self._ipv6address
except Exception as e:
raise e
@ipv6address.setter
def ipv6address(self, ipv6address) :
"""IPv6 address to create on the NetScaler appliance.<br/>Minimum length = 1
"""
try :
self._ipv6address = ipv6address
except Exception as e:
raise e
@property
def scope(self) :
"""Scope of the IPv6 address to be created. Cannot be changed after the IP address is created.<br/>Default value: global<br/>Possible values = global, link-local.
"""
try :
return self._scope
except Exception as e:
raise e
@scope.setter
def scope(self, scope) :
"""Scope of the IPv6 address to be created. Cannot be changed after the IP address is created.<br/>Default value: global<br/>Possible values = global, link-local
"""
try :
self._scope = scope
except Exception as e:
raise e
@property
def type(self) :
"""Type of IP address to be created on the NetScaler appliance. Cannot be changed after the IP address is created.<br/>Default value: SNIP<br/>Possible values = NSIP, VIP, SNIP, GSLBsiteIP, ADNSsvcIP, CLIP.
"""
try :
return self._type
except Exception as e:
raise e
@type.setter
def type(self, type) :
"""Type of IP address to be created on the NetScaler appliance. Cannot be changed after the IP address is created.<br/>Default value: SNIP<br/>Possible values = NSIP, VIP, SNIP, GSLBsiteIP, ADNSsvcIP, CLIP
"""
try :
self._type = type
except Exception as e:
raise e
@property
def vlan(self) :
"""The VLAN number.<br/>Default value: 0<br/>Maximum length = 4094.
"""
try :
return self._vlan
except Exception as e:
raise e
@vlan.setter
def vlan(self, vlan) :
"""The VLAN number.<br/>Default value: 0<br/>Maximum length = 4094
"""
try :
self._vlan = vlan
except Exception as e:
raise e
@property
def nd(self) :
"""Respond to Neighbor Discovery (ND) requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._nd
except Exception as e:
raise e
@nd.setter
def nd(self, nd) :
"""Respond to Neighbor Discovery (ND) requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._nd = nd
except Exception as e:
raise e
@property
def icmp(self) :
"""Respond to ICMP requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._icmp
except Exception as e:
raise e
@icmp.setter
def icmp(self, icmp) :
"""Respond to ICMP requests for this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._icmp = icmp
except Exception as e:
raise e
@property
def vserver(self) :
"""Enable or disable the state of all the virtual servers associated with this VIP6 address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._vserver
except Exception as e:
raise e
@vserver.setter
def vserver(self, vserver) :
"""Enable or disable the state of all the virtual servers associated with this VIP6 address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._vserver = vserver
except Exception as e:
raise e
@property
def telnet(self) :
"""Allow Telnet access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._telnet
except Exception as e:
raise e
@telnet.setter
def telnet(self, telnet) :
"""Allow Telnet access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._telnet = telnet
except Exception as e:
raise e
@property
def ftp(self) :
"""Allow File Transfer Protocol (FTP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._ftp
except Exception as e:
raise e
@ftp.setter
def ftp(self, ftp) :
"""Allow File Transfer Protocol (FTP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._ftp = ftp
except Exception as e:
raise e
@property
def gui(self) :
"""Allow graphical user interface (GUI) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, SECUREONLY, DISABLED.
"""
try :
return self._gui
except Exception as e:
raise e
@gui.setter
def gui(self, gui) :
"""Allow graphical user interface (GUI) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, SECUREONLY, DISABLED
"""
try :
self._gui = gui
except Exception as e:
raise e
@property
def ssh(self) :
"""Allow secure Shell (SSH) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._ssh
except Exception as e:
raise e
@ssh.setter
def ssh(self, ssh) :
"""Allow secure Shell (SSH) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._ssh = ssh
except Exception as e:
raise e
@property
def snmp(self) :
"""Allow Simple Network Management Protocol (SNMP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._snmp
except Exception as e:
raise e
@snmp.setter
def snmp(self, snmp) :
"""Allow Simple Network Management Protocol (SNMP) access to this IP address.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._snmp = snmp
except Exception as e:
raise e
@property
def mgmtaccess(self) :
"""Allow access to management applications on this IP address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._mgmtaccess
except Exception as e:
raise e
@mgmtaccess.setter
def mgmtaccess(self, mgmtaccess) :
"""Allow access to management applications on this IP address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._mgmtaccess = mgmtaccess
except Exception as e:
raise e
@property
def restrictaccess(self) :
"""Block access to nonmanagement applications on this IP address. This option is applicable forMIP6s, SNIP6s, and NSIP6s, and is disabled by default. Nonmanagement applications can run on the underlying NetScaler Free BSD operating system.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._restrictaccess
except Exception as e:
raise e
@restrictaccess.setter
def restrictaccess(self, restrictaccess) :
"""Block access to nonmanagement applications on this IP address. This option is applicable forMIP6s, SNIP6s, and NSIP6s, and is disabled by default. Nonmanagement applications can run on the underlying NetScaler Free BSD operating system.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._restrictaccess = restrictaccess
except Exception as e:
raise e
@property
def dynamicrouting(self) :
"""Allow dynamic routing on this IP address. Specific to Subnet IPv6 (SNIP6) address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._dynamicrouting
except Exception as e:
raise e
@dynamicrouting.setter
def dynamicrouting(self, dynamicrouting) :
"""Allow dynamic routing on this IP address. Specific to Subnet IPv6 (SNIP6) address.<br/>Default value: DISABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._dynamicrouting = dynamicrouting
except Exception as e:
raise e
@property
def hostroute(self) :
"""Advertise a route for the VIP6 address by using the dynamic routing protocols running on the NetScaler appliance.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._hostroute
except Exception as e:
raise e
@hostroute.setter
def hostroute(self, hostroute) :
"""Advertise a route for the VIP6 address by using the dynamic routing protocols running on the NetScaler appliance.<br/>Possible values = ENABLED, DISABLED
"""
try :
self._hostroute = hostroute
except Exception as e:
raise e
@property
def ip6hostrtgw(self) :
"""IPv6 address of the gateway for the route. If Gateway is not set, VIP uses :: as the gateway.<br/>Default value: 0.
"""
try :
return self._ip6hostrtgw
except Exception as e:
raise e
@ip6hostrtgw.setter
def ip6hostrtgw(self, ip6hostrtgw) :
"""IPv6 address of the gateway for the route. If Gateway is not set, VIP uses :: as the gateway.<br/>Default value: 0
"""
try :
self._ip6hostrtgw = ip6hostrtgw
except Exception as e:
raise e
@property
def metric(self) :
"""Integer value to add to or subtract from the cost of the route advertised for the VIP6 address.<br/>Minimum length = -16777215.
"""
try :
return self._metric
except Exception as e:
raise e
@metric.setter
def metric(self, metric) :
"""Integer value to add to or subtract from the cost of the route advertised for the VIP6 address.<br/>Minimum length = -16777215
"""
try :
self._metric = metric
except Exception as e:
raise e
@property
def vserverrhilevel(self) :
"""Advertise or do not advertise the route for the Virtual IP (VIP6) address on the basis of the state of the virtual servers associated with that VIP6.
* NONE - Advertise the route for the VIP6 address, irrespective of the state of the virtual servers associated with the address.
* ONE VSERVER - Advertise the route for the VIP6 address if at least one of the associated virtual servers is in UP state.
* ALL VSERVER - Advertise the route for the VIP6 address if all of the associated virtual servers are in UP state.
* VSVR_CNTRLD. Advertise the route for the VIP address according to the RHIstate (RHI STATE) parameter setting on all the associated virtual servers of the VIP address along with their states.
When Vserver RHI Level (RHI) parameter is set to VSVR_CNTRLD, the following are different RHI behaviors for the VIP address on the basis of RHIstate (RHI STATE) settings on the virtual servers associated with the VIP address:
* If you set RHI STATE to PASSIVE on all virtual servers, the NetScaler ADC always advertises the route for the VIP address.
* If you set RHI STATE to ACTIVE on all virtual servers, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers is in UP state.
*If you set RHI STATE to ACTIVE on some and PASSIVE on others, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers, whose RHI STATE set to ACTIVE, is in UP state.<br/>Default value: ONE_VSERVER<br/>Possible values = ONE_VSERVER, ALL_VSERVERS, NONE, VSVR_CNTRLD.
"""
try :
return self._vserverrhilevel
except Exception as e:
raise e
@vserverrhilevel.setter
def vserverrhilevel(self, vserverrhilevel) :
"""Advertise or do not advertise the route for the Virtual IP (VIP6) address on the basis of the state of the virtual servers associated with that VIP6.
* NONE - Advertise the route for the VIP6 address, irrespective of the state of the virtual servers associated with the address.
* ONE VSERVER - Advertise the route for the VIP6 address if at least one of the associated virtual servers is in UP state.
* ALL VSERVER - Advertise the route for the VIP6 address if all of the associated virtual servers are in UP state.
* VSVR_CNTRLD. Advertise the route for the VIP address according to the RHIstate (RHI STATE) parameter setting on all the associated virtual servers of the VIP address along with their states.
When Vserver RHI Level (RHI) parameter is set to VSVR_CNTRLD, the following are different RHI behaviors for the VIP address on the basis of RHIstate (RHI STATE) settings on the virtual servers associated with the VIP address:
* If you set RHI STATE to PASSIVE on all virtual servers, the NetScaler ADC always advertises the route for the VIP address.
* If you set RHI STATE to ACTIVE on all virtual servers, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers is in UP state.
*If you set RHI STATE to ACTIVE on some and PASSIVE on others, the NetScaler ADC advertises the route for the VIP address if at least one of the associated virtual servers, whose RHI STATE set to ACTIVE, is in UP state.<br/>Default value: ONE_VSERVER<br/>Possible values = ONE_VSERVER, ALL_VSERVERS, NONE, VSVR_CNTRLD
"""
try :
self._vserverrhilevel = vserverrhilevel
except Exception as e:
raise e
@property
def ospf6lsatype(self) :
"""Type of LSAs to be used by the IPv6 OSPF protocol, running on the NetScaler appliance, for advertising the route for the VIP6 address.<br/>Default value: EXTERNAL<br/>Possible values = INTRA_AREA, EXTERNAL.
"""
try :
return self._ospf6lsatype
except Exception as e:
raise e
@ospf6lsatype.setter
def ospf6lsatype(self, ospf6lsatype) :
"""Type of LSAs to be used by the IPv6 OSPF protocol, running on the NetScaler appliance, for advertising the route for the VIP6 address.<br/>Default value: EXTERNAL<br/>Possible values = INTRA_AREA, EXTERNAL
"""
try :
self._ospf6lsatype = ospf6lsatype
except Exception as e:
raise e
@property
def ospfarea(self) :
"""ID of the area in which the Intra-Area-Prefix LSAs are to be advertised for the VIP6 address by the IPv6 OSPF protocol running on the NetScaler appliance. When ospfArea is not set, VIP6 is advertised on all areas.<br/>Default value: -1<br/>Maximum length = 4294967294LU.
"""
try :
return self._ospfarea
except Exception as e:
raise e
@ospfarea.setter
def ospfarea(self, ospfarea) :
"""ID of the area in which the Intra-Area-Prefix LSAs are to be advertised for the VIP6 address by the IPv6 OSPF protocol running on the NetScaler appliance. When ospfArea is not set, VIP6 is advertised on all areas.<br/>Default value: -1<br/>Maximum length = 4294967294LU
"""
try :
self._ospfarea = ospfarea
except Exception as e:
raise e
@property
def state(self) :
"""Enable or disable the IP address.<br/>Default value: ENABLED<br/>Possible values = DISABLED, ENABLED.
"""
try :
return self._state
except Exception as e:
raise e
@state.setter
def state(self, state) :
"""Enable or disable the IP address.<br/>Default value: ENABLED<br/>Possible values = DISABLED, ENABLED
"""
try :
self._state = state
except Exception as e:
raise e
@property
def map(self) :
"""Mapped IPV4 address for the IPV6 address.
"""
try :
return self._map
except Exception as e:
raise e
@map.setter
def map(self, map) :
"""Mapped IPV4 address for the IPV6 address.
"""
try :
self._map = map
except Exception as e:
raise e
@property
def ownernode(self) :
"""ID of the cluster node for which you are adding the IP address. Must be used if you want the IP address to be active only on the specific node. Can be configured only through the cluster IP address. Cannot be changed after the IP address is created.<br/>Default value: 255.
"""
try :
return self._ownernode
except Exception as e:
raise e
@ownernode.setter
def ownernode(self, ownernode) :
"""ID of the cluster node for which you are adding the IP address. Must be used if you want the IP address to be active only on the specific node. Can be configured only through the cluster IP address. Cannot be changed after the IP address is created.<br/>Default value: 255
"""
try :
self._ownernode = ownernode
except Exception as e:
raise e
@property
def td(self) :
"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094.
"""
try :
return self._td
except Exception as e:
raise e
@td.setter
def td(self, td) :
"""Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0.<br/>Maximum length = 4094
"""
try :
self._td = td
except Exception as e:
raise e
@property
def iptype(self) :
"""The type of the IPv6 address.<br/>Possible values = NSIP, VIP, SNIP, GSLBsiteIP, ADNSsvcIP, CLIP.
"""
try :
return self._iptype
except Exception as e:
raise e
@property
def curstate(self) :
"""Current state of this IP.<br/>Default value: ENABLED<br/>Possible values = DISABLED, ENABLED.
"""
try :
return self._curstate
except Exception as e:
raise e
@property
def viprtadv2bsd(self) :
"""Whether this route is advertised to FreeBSD.
"""
try :
return self._viprtadv2bsd
except Exception as e:
raise e
@property
def vipvsercount(self) :
"""Number of vservers bound to this VIP.
"""
try :
return self._vipvsercount
except Exception as e:
raise e
@property
def vipvserdowncount(self) :
"""Number of vservers bound to this VIP, which are down.
"""
try :
return self._vipvserdowncount
except Exception as e:
raise e
@property
def systemtype(self) :
"""The type of the System. Possible Values: Standalone, HA, Cluster. Used for display purpose.<br/>Possible values = Stand-alone, HA, Cluster.
"""
try :
return self._systemtype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nsip6_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nsip6
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.ipv6address) :
return str(self.ipv6address)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add nsip6.
"""
try :
if type(resource) is not list :
addresource = nsip6()
addresource.ipv6address = resource.ipv6address
addresource.scope = resource.scope
addresource.type = resource.type
addresource.vlan = resource.vlan
addresource.nd = resource.nd
addresource.icmp = resource.icmp
addresource.vserver = resource.vserver
addresource.telnet = resource.telnet
addresource.ftp = resource.ftp
addresource.gui = resource.gui
addresource.ssh = resource.ssh
addresource.snmp = resource.snmp
addresource.mgmtaccess = resource.mgmtaccess
addresource.restrictaccess = resource.restrictaccess
addresource.dynamicrouting = resource.dynamicrouting
addresource.hostroute = resource.hostroute
addresource.ip6hostrtgw = resource.ip6hostrtgw
addresource.metric = resource.metric
addresource.vserverrhilevel = resource.vserverrhilevel
addresource.ospf6lsatype = resource.ospf6lsatype
addresource.ospfarea = resource.ospfarea
addresource.state = resource.state
addresource.map = resource.map
addresource.ownernode = resource.ownernode
addresource.td = resource.td
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].ipv6address = resource[i].ipv6address
addresources[i].scope = resource[i].scope
addresources[i].type = resource[i].type
addresources[i].vlan = resource[i].vlan
addresources[i].nd = resource[i].nd
addresources[i].icmp = resource[i].icmp
addresources[i].vserver = resource[i].vserver
addresources[i].telnet = resource[i].telnet
addresources[i].ftp = resource[i].ftp
addresources[i].gui = resource[i].gui
addresources[i].ssh = resource[i].ssh
addresources[i].snmp = resource[i].snmp
addresources[i].mgmtaccess = resource[i].mgmtaccess
addresources[i].restrictaccess = resource[i].restrictaccess
addresources[i].dynamicrouting = resource[i].dynamicrouting
addresources[i].hostroute = resource[i].hostroute
addresources[i].ip6hostrtgw = resource[i].ip6hostrtgw
addresources[i].metric = resource[i].metric
addresources[i].vserverrhilevel = resource[i].vserverrhilevel
addresources[i].ospf6lsatype = resource[i].ospf6lsatype
addresources[i].ospfarea = resource[i].ospfarea
addresources[i].state = resource[i].state
addresources[i].map = resource[i].map
addresources[i].ownernode = resource[i].ownernode
addresources[i].td = resource[i].td
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete nsip6.
"""
try :
if type(resource) is not list :
deleteresource = nsip6()
if type(resource) != type(deleteresource):
deleteresource.ipv6address = resource
else :
deleteresource.ipv6address = resource.ipv6address
deleteresource.td = resource.td
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].ipv6address = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].ipv6address = resource[i].ipv6address
deleteresources[i].td = resource[i].td
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update nsip6.
"""
try :
if type(resource) is not list :
updateresource = nsip6()
updateresource.ipv6address = resource.ipv6address
updateresource.td = resource.td
updateresource.nd = resource.nd
updateresource.icmp = resource.icmp
updateresource.vserver = resource.vserver
updateresource.telnet = resource.telnet
updateresource.ftp = resource.ftp
updateresource.gui = resource.gui
updateresource.ssh = resource.ssh
updateresource.snmp = resource.snmp
updateresource.mgmtaccess = resource.mgmtaccess
updateresource.restrictaccess = resource.restrictaccess
updateresource.state = resource.state
updateresource.map = resource.map
updateresource.dynamicrouting = resource.dynamicrouting
updateresource.hostroute = resource.hostroute
updateresource.ip6hostrtgw = resource.ip6hostrtgw
updateresource.metric = resource.metric
updateresource.vserverrhilevel = resource.vserverrhilevel
updateresource.ospf6lsatype = resource.ospf6lsatype
updateresource.ospfarea = resource.ospfarea
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].ipv6address = resource[i].ipv6address
updateresources[i].td = resource[i].td
updateresources[i].nd = resource[i].nd
updateresources[i].icmp = resource[i].icmp
updateresources[i].vserver = resource[i].vserver
updateresources[i].telnet = resource[i].telnet
updateresources[i].ftp = resource[i].ftp
updateresources[i].gui = resource[i].gui
updateresources[i].ssh = resource[i].ssh
updateresources[i].snmp = resource[i].snmp
updateresources[i].mgmtaccess = resource[i].mgmtaccess
updateresources[i].restrictaccess = resource[i].restrictaccess
updateresources[i].state = resource[i].state
updateresources[i].map = resource[i].map
updateresources[i].dynamicrouting = resource[i].dynamicrouting
updateresources[i].hostroute = resource[i].hostroute
updateresources[i].ip6hostrtgw = resource[i].ip6hostrtgw
updateresources[i].metric = resource[i].metric
updateresources[i].vserverrhilevel = resource[i].vserverrhilevel
updateresources[i].ospf6lsatype = resource[i].ospf6lsatype
updateresources[i].ospfarea = resource[i].ospfarea
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of nsip6 resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = nsip6()
if type(resource) != type(unsetresource):
unsetresource.ipv6address = resource
else :
unsetresource.ipv6address = resource.ipv6address
unsetresource.td = resource.td
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ipv6address = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ nsip6() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].ipv6address = resource[i].ipv6address
unsetresources[i].td = resource[i].td
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the nsip6 resources that are configured on netscaler.
"""
try :
if not name :
obj = nsip6()
response = obj.get_resources(client, option_)
else :
if type(name) == cls :
if type(name) is not list :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name)
response = name.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [nsip6() for _ in range(len(name))]
for i in range(len(name)) :
option_ = options()
option_.args = nitro_util.object_to_string_withoutquotes(name[i])
response[i] = name[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of nsip6 resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nsip6()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the nsip6 resources configured on NetScaler.
"""
try :
obj = nsip6()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of nsip6 resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nsip6()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Iptype:
NSIP = "NSIP"
VIP = "VIP"
SNIP = "SNIP"
GSLBsiteIP = "GSLBsiteIP"
ADNSsvcIP = "ADNSsvcIP"
CLIP = "CLIP"
class Ssh:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class State:
DISABLED = "DISABLED"
ENABLED = "ENABLED"
class Ospf6lsatype:
INTRA_AREA = "INTRA_AREA"
EXTERNAL = "EXTERNAL"
class Scope:
GLOBAL = "global"
link_local = "link-local"
class Nd:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Systemtype:
Stand_alone = "Stand-alone"
HA = "HA"
Cluster = "Cluster"
class Gui:
ENABLED = "ENABLED"
SECUREONLY = "SECUREONLY"
DISABLED = "DISABLED"
class Dynamicrouting:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Type:
NSIP = "NSIP"
VIP = "VIP"
SNIP = "SNIP"
GSLBsiteIP = "GSLBsiteIP"
ADNSsvcIP = "ADNSsvcIP"
CLIP = "CLIP"
class Mgmtaccess:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Hostroute:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Ftp:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Vserverrhilevel:
ONE_VSERVER = "ONE_VSERVER"
ALL_VSERVERS = "ALL_VSERVERS"
NONE = "NONE"
VSVR_CNTRLD = "VSVR_CNTRLD"
class Icmp:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Vserver:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Snmp:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Curstate:
DISABLED = "DISABLED"
ENABLED = "ENABLED"
class Restrictaccess:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Telnet:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class nsip6_response(base_response) :
def __init__(self, length=1) :
self.nsip6 = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nsip6 = [nsip6() for _ in range(length)]
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: postgresql_privs
version_added: '1.2'
short_description: Grant or revoke privileges on PostgreSQL database objects
description:
- Grant or revoke privileges on PostgreSQL database objects.
- This module is basically a wrapper around most of the functionality of
PostgreSQL's GRANT and REVOKE statements with detection of changes
(GRANT/REVOKE I(privs) ON I(type) I(objs) TO/FROM I(roles)).
options:
database:
description:
- Name of database to connect to.
required: yes
type: str
aliases:
- db
- login_db
state:
description:
- If C(present), the specified privileges are granted, if C(absent) they are revoked.
type: str
default: present
choices: [ absent, present ]
privs:
description:
- Comma separated list of privileges to grant/revoke.
type: str
aliases:
- priv
type:
description:
- Type of database object to set privileges on.
- The `default_privs` choice is available starting at version 2.7.
- The 'foreign_data_wrapper' and 'foreign_server' object types are available from Ansible version '2.8'.
type: str
default: table
choices: [ database, default_privs, foreign_data_wrapper, foreign_server, function,
group, language, table, tablespace, schema, sequence ]
objs:
description:
- Comma separated list of database objects to set privileges on.
- If I(type) is C(table), C(partition table), C(sequence) or C(function),
the special valueC(ALL_IN_SCHEMA) can be provided instead to specify all
database objects of type I(type) in the schema specified via I(schema).
(This also works with PostgreSQL < 9.0.) (C(ALL_IN_SCHEMA) is available
for C(function) and C(partition table) from version 2.8)
- If I(type) is C(database), this parameter can be omitted, in which case
privileges are set for the database specified via I(database).
- 'If I(type) is I(function), colons (":") in object names will be
replaced with commas (needed to specify function signatures, see examples)'
type: str
aliases:
- obj
schema:
description:
- Schema that contains the database objects specified via I(objs).
- May only be provided if I(type) is C(table), C(sequence), C(function)
or C(default_privs). Defaults to C(public) in these cases.
type: str
roles:
description:
- Comma separated list of role (user/group) names to set permissions for.
- The special value C(PUBLIC) can be provided instead to set permissions
for the implicitly defined PUBLIC group.
type: str
required: yes
aliases:
- role
fail_on_role:
version_added: '2.8'
description:
- If C(yes), fail when target role (for whom privs need to be granted) does not exist.
Otherwise just warn and continue.
default: yes
type: bool
session_role:
version_added: '2.8'
description:
- Switch to session_role after connecting.
- The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though the session_role were the one that had logged in originally.
type: str
target_roles:
description:
- A list of existing role (user/group) names to set as the
default permissions for database objects subsequently created by them.
- Parameter I(target_roles) is only available with C(type=default_privs).
type: str
version_added: '2.8'
grant_option:
description:
- Whether C(role) may grant/revoke the specified privileges/group memberships to others.
- Set to C(no) to revoke GRANT OPTION, leave unspecified to make no changes.
- I(grant_option) only has an effect if I(state) is C(present).
type: bool
aliases:
- admin_option
host:
description:
- Database host address. If unspecified, connect via Unix socket.
type: str
aliases:
- login_host
port:
description:
- Database port to connect to.
type: int
default: 5432
aliases:
- login_port
unix_socket:
description:
- Path to a Unix domain socket for local connections.
type: str
aliases:
- login_unix_socket
login:
description:
- The username to authenticate with.
type: str
default: postgres
aliases:
- login_user
password:
description:
- The password to authenticate with.
type: str
aliases:
- login_password
ssl_mode:
description:
- Determines whether or with what priority a secure SSL TCP/IP connection will be negotiated with the server.
- See https://www.postgresql.org/docs/current/static/libpq-ssl.html for more information on the modes.
- Default of C(prefer) matches libpq default.
type: str
default: prefer
choices: [ allow, disable, prefer, require, verify-ca, verify-full ]
version_added: '2.3'
ca_cert:
description:
- Specifies the name of a file containing SSL certificate authority (CA) certificate(s).
- If the file exists, the server's certificate will be verified to be signed by one of these authorities.
version_added: '2.3'
type: str
aliases:
- ssl_rootcert
notes:
- Parameters that accept comma separated lists (I(privs), I(objs), I(roles))
have singular alias names (I(priv), I(obj), I(role)).
- To revoke only C(GRANT OPTION) for a specific object, set I(state) to
C(present) and I(grant_option) to C(no) (see examples).
- Note that when revoking privileges from a role R, this role may still have
access via privileges granted to any role R is a member of including C(PUBLIC).
- Note that when revoking privileges from a role R, you do so as the user
specified via I(login). If R has been granted the same privileges by
another user also, R can still access database objects via these privileges.
- When revoking privileges, C(RESTRICT) is assumed (see PostgreSQL docs).
seealso:
- module: postgresql_user
- module: postgresql_owner
- module: postgresql_membership
- name: PostgreSQL privileges
description: General information about PostgreSQL privileges.
link: https://www.postgresql.org/docs/current/ddl-priv.html
- name: PostgreSQL GRANT command reference
description: Complete reference of the PostgreSQL GRANT command documentation.
link: https://www.postgresql.org/docs/current/sql-grant.html
- name: PostgreSQL REVOKE command reference
description: Complete reference of the PostgreSQL REVOKE command documentation.
link: https://www.postgresql.org/docs/current/sql-revoke.html
extends_documentation_fragment:
- postgres
author:
- Bernhard Weitzhofer (@b6d)
'''
EXAMPLES = r'''
# On database "library":
# GRANT SELECT, INSERT, UPDATE ON TABLE public.books, public.authors
# TO librarian, reader WITH GRANT OPTION
- name: Grant privs to librarian and reader on database library
postgresql_privs:
database: library
state: present
privs: SELECT,INSERT,UPDATE
type: table
objs: books,authors
schema: public
roles: librarian,reader
grant_option: yes
- name: Same as above leveraging default values
postgresql_privs:
db: library
privs: SELECT,INSERT,UPDATE
objs: books,authors
roles: librarian,reader
grant_option: yes
# REVOKE GRANT OPTION FOR INSERT ON TABLE books FROM reader
# Note that role "reader" will be *granted* INSERT privilege itself if this
# isn't already the case (since state: present).
- name: Revoke privs from reader
postgresql_privs:
db: library
state: present
priv: INSERT
obj: books
role: reader
grant_option: no
# "public" is the default schema. This also works for PostgreSQL 8.x.
- name: REVOKE INSERT, UPDATE ON ALL TABLES IN SCHEMA public FROM reader
postgresql_privs:
db: library
state: absent
privs: INSERT,UPDATE
objs: ALL_IN_SCHEMA
role: reader
- name: GRANT ALL PRIVILEGES ON SCHEMA public, math TO librarian
postgresql_privs:
db: library
privs: ALL
type: schema
objs: public,math
role: librarian
# Note the separation of arguments with colons.
- name: GRANT ALL PRIVILEGES ON FUNCTION math.add(int, int) TO librarian, reader
postgresql_privs:
db: library
privs: ALL
type: function
obj: add(int:int)
schema: math
roles: librarian,reader
# Note that group role memberships apply cluster-wide and therefore are not
# restricted to database "library" here.
- name: GRANT librarian, reader TO alice, bob WITH ADMIN OPTION
postgresql_privs:
db: library
type: group
objs: librarian,reader
roles: alice,bob
admin_option: yes
# Note that here "db: postgres" specifies the database to connect to, not the
# database to grant privileges on (which is specified via the "objs" param)
- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
postgresql_privs:
db: postgres
privs: ALL
type: database
obj: library
role: librarian
# If objs is omitted for type "database", it defaults to the database
# to which the connection is established
- name: GRANT ALL PRIVILEGES ON DATABASE library TO librarian
postgresql_privs:
db: library
privs: ALL
type: database
role: librarian
# Available since version 2.7
# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
# ALL_DEFAULT works only with privs=ALL
# For specific
- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO librarian
postgresql_privs:
db: library
objs: ALL_DEFAULT
privs: ALL
type: default_privs
role: librarian
grant_option: yes
# Available since version 2.7
# Objs must be set, ALL_DEFAULT to TABLES/SEQUENCES/TYPES/FUNCTIONS
# ALL_DEFAULT works only with privs=ALL
# For specific
- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 1
postgresql_privs:
db: library
objs: TABLES,SEQUENCES
privs: SELECT
type: default_privs
role: reader
- name: ALTER DEFAULT PRIVILEGES ON DATABASE library TO reader, step 2
postgresql_privs:
db: library
objs: TYPES
privs: USAGE
type: default_privs
role: reader
# Available since version 2.8
- name: GRANT ALL PRIVILEGES ON FOREIGN DATA WRAPPER fdw TO reader
postgresql_privs:
db: test
objs: fdw
privs: ALL
type: foreign_data_wrapper
role: reader
# Available since version 2.8
- name: GRANT ALL PRIVILEGES ON FOREIGN SERVER fdw_server TO reader
postgresql_privs:
db: test
objs: fdw_server
privs: ALL
type: foreign_server
role: reader
# Available since version 2.8
# Grant 'execute' permissions on all functions in schema 'common' to role 'caller'
- name: GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA common TO caller
postgresql_privs:
type: function
state: present
privs: EXECUTE
roles: caller
objs: ALL_IN_SCHEMA
schema: common
# Available since version 2.8
# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library GRANT SELECT ON TABLES TO reader
# GRANT SELECT privileges for new TABLES objects created by librarian as
# default to the role reader.
# For specific
- name: ALTER privs
postgresql_privs:
db: library
schema: library
objs: TABLES
privs: SELECT
type: default_privs
role: reader
target_roles: librarian
# Available since version 2.8
# ALTER DEFAULT PRIVILEGES FOR ROLE librarian IN SCHEMA library REVOKE SELECT ON TABLES FROM reader
# REVOKE SELECT privileges for new TABLES objects created by librarian as
# default from the role reader.
# For specific
- name: ALTER privs
postgresql_privs:
db: library
state: absent
schema: library
objs: TABLES
privs: SELECT
type: default_privs
role: reader
target_roles: librarian
'''
RETURN = r'''
queries:
description: List of executed queries.
returned: always
type: list
sample: ['REVOKE GRANT OPTION FOR INSERT ON TABLE "books" FROM "reader";']
version_added: '2.8'
'''
import traceback
PSYCOPG2_IMP_ERR = None
try:
import psycopg2
import psycopg2.extensions
except ImportError:
PSYCOPG2_IMP_ERR = traceback.format_exc()
psycopg2 = None
# import module snippets
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.database import pg_quote_identifier
from ansible.module_utils.postgres import postgres_common_argument_spec
from ansible.module_utils._text import to_native
VALID_PRIVS = frozenset(('SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE',
'REFERENCES', 'TRIGGER', 'CREATE', 'CONNECT',
'TEMPORARY', 'TEMP', 'EXECUTE', 'USAGE', 'ALL', 'USAGE'))
VALID_DEFAULT_OBJS = {'TABLES': ('ALL', 'SELECT', 'INSERT', 'UPDATE', 'DELETE', 'TRUNCATE', 'REFERENCES', 'TRIGGER'),
'SEQUENCES': ('ALL', 'SELECT', 'UPDATE', 'USAGE'),
'FUNCTIONS': ('ALL', 'EXECUTE'),
'TYPES': ('ALL', 'USAGE')}
executed_queries = []
class Error(Exception):
pass
def role_exists(module, cursor, rolname):
"""Check user exists or not"""
query = "SELECT 1 FROM pg_roles WHERE rolname = '%s'" % rolname
try:
cursor.execute(query)
return cursor.rowcount > 0
except Exception as e:
module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
return False
# We don't have functools.partial in Python < 2.5
def partial(f, *args, **kwargs):
"""Partial function application"""
def g(*g_args, **g_kwargs):
new_kwargs = kwargs.copy()
new_kwargs.update(g_kwargs)
return f(*(args + g_args), **g_kwargs)
g.f = f
g.args = args
g.kwargs = kwargs
return g
class Connection(object):
"""Wrapper around a psycopg2 connection with some convenience methods"""
def __init__(self, params, module):
self.database = params.database
self.module = module
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"host": "host",
"login": "user",
"password": "password",
"port": "port",
"database": "database",
"ssl_mode": "sslmode",
"ca_cert": "sslrootcert"
}
kw = dict((params_map[k], getattr(params, k)) for k in params_map
if getattr(params, k) != '' and getattr(params, k) is not None)
# If a unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] == "" or kw["host"] == "localhost"
if is_localhost and params.unix_socket != "":
kw["host"] = params.unix_socket
sslrootcert = params.ca_cert
if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
raise ValueError('psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter')
self.connection = psycopg2.connect(**kw)
self.cursor = self.connection.cursor()
def commit(self):
self.connection.commit()
def rollback(self):
self.connection.rollback()
@property
def encoding(self):
"""Connection encoding in Python-compatible form"""
return psycopg2.extensions.encodings[self.connection.encoding]
# Methods for querying database objects
# PostgreSQL < 9.0 doesn't support "ALL TABLES IN SCHEMA schema"-like
# phrases in GRANT or REVOKE statements, therefore alternative methods are
# provided here.
def schema_exists(self, schema):
query = """SELECT count(*)
FROM pg_catalog.pg_namespace WHERE nspname = %s"""
self.cursor.execute(query, (schema,))
return self.cursor.fetchone()[0] > 0
def get_all_tables_in_schema(self, schema):
if not self.schema_exists(schema):
raise Error('Schema "%s" does not exist.' % schema)
query = """SELECT relname
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE nspname = %s AND relkind in ('r', 'v', 'm', 'p')"""
self.cursor.execute(query, (schema,))
return [t[0] for t in self.cursor.fetchall()]
def get_all_sequences_in_schema(self, schema):
if not self.schema_exists(schema):
raise Error('Schema "%s" does not exist.' % schema)
query = """SELECT relname
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE nspname = %s AND relkind = 'S'"""
self.cursor.execute(query, (schema,))
return [t[0] for t in self.cursor.fetchall()]
def get_all_functions_in_schema(self, schema):
if not self.schema_exists(schema):
raise Error('Schema "%s" does not exist.' % schema)
query = """SELECT p.proname, oidvectortypes(p.proargtypes)
FROM pg_catalog.pg_proc p
JOIN pg_namespace n ON n.oid = p.pronamespace
WHERE nspname = %s"""
self.cursor.execute(query, (schema,))
return ["%s(%s)" % (t[0], t[1]) for t in self.cursor.fetchall()]
# Methods for getting access control lists and group membership info
# To determine whether anything has changed after granting/revoking
# privileges, we compare the access control lists of the specified database
# objects before and afterwards. Python's list/string comparison should
# suffice for change detection, we should not actually have to parse ACLs.
# The same should apply to group membership information.
def get_table_acls(self, schema, tables):
query = """SELECT relacl
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE nspname = %s AND relkind in ('r','p','v','m') AND relname = ANY (%s)
ORDER BY relname"""
self.cursor.execute(query, (schema, tables))
return [t[0] for t in self.cursor.fetchall()]
def get_sequence_acls(self, schema, sequences):
query = """SELECT relacl
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE nspname = %s AND relkind = 'S' AND relname = ANY (%s)
ORDER BY relname"""
self.cursor.execute(query, (schema, sequences))
return [t[0] for t in self.cursor.fetchall()]
def get_function_acls(self, schema, function_signatures):
funcnames = [f.split('(', 1)[0] for f in function_signatures]
query = """SELECT proacl
FROM pg_catalog.pg_proc p
JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
WHERE nspname = %s AND proname = ANY (%s)
ORDER BY proname, proargtypes"""
self.cursor.execute(query, (schema, funcnames))
return [t[0] for t in self.cursor.fetchall()]
def get_schema_acls(self, schemas):
query = """SELECT nspacl FROM pg_catalog.pg_namespace
WHERE nspname = ANY (%s) ORDER BY nspname"""
self.cursor.execute(query, (schemas,))
return [t[0] for t in self.cursor.fetchall()]
def get_language_acls(self, languages):
query = """SELECT lanacl FROM pg_catalog.pg_language
WHERE lanname = ANY (%s) ORDER BY lanname"""
self.cursor.execute(query, (languages,))
return [t[0] for t in self.cursor.fetchall()]
def get_tablespace_acls(self, tablespaces):
query = """SELECT spcacl FROM pg_catalog.pg_tablespace
WHERE spcname = ANY (%s) ORDER BY spcname"""
self.cursor.execute(query, (tablespaces,))
return [t[0] for t in self.cursor.fetchall()]
def get_database_acls(self, databases):
query = """SELECT datacl FROM pg_catalog.pg_database
WHERE datname = ANY (%s) ORDER BY datname"""
self.cursor.execute(query, (databases,))
return [t[0] for t in self.cursor.fetchall()]
def get_group_memberships(self, groups):
query = """SELECT roleid, grantor, member, admin_option
FROM pg_catalog.pg_auth_members am
JOIN pg_catalog.pg_roles r ON r.oid = am.roleid
WHERE r.rolname = ANY(%s)
ORDER BY roleid, grantor, member"""
self.cursor.execute(query, (groups,))
return self.cursor.fetchall()
def get_default_privs(self, schema, *args):
query = """SELECT defaclacl
FROM pg_default_acl a
JOIN pg_namespace b ON a.defaclnamespace=b.oid
WHERE b.nspname = %s;"""
self.cursor.execute(query, (schema,))
return [t[0] for t in self.cursor.fetchall()]
def get_foreign_data_wrapper_acls(self, fdws):
query = """SELECT fdwacl FROM pg_catalog.pg_foreign_data_wrapper
WHERE fdwname = ANY (%s) ORDER BY fdwname"""
self.cursor.execute(query, (fdws,))
return [t[0] for t in self.cursor.fetchall()]
def get_foreign_server_acls(self, fs):
query = """SELECT srvacl FROM pg_catalog.pg_foreign_server
WHERE srvname = ANY (%s) ORDER BY srvname"""
self.cursor.execute(query, (fs,))
return [t[0] for t in self.cursor.fetchall()]
# Manipulating privileges
def manipulate_privs(self, obj_type, privs, objs, roles, target_roles,
state, grant_option, schema_qualifier=None, fail_on_role=True):
"""Manipulate database object privileges.
:param obj_type: Type of database object to grant/revoke
privileges for.
:param privs: Either a list of privileges to grant/revoke
or None if type is "group".
:param objs: List of database objects to grant/revoke
privileges for.
:param roles: Either a list of role names or "PUBLIC"
for the implicitly defined "PUBLIC" group
:param target_roles: List of role names to grant/revoke
default privileges as.
:param state: "present" to grant privileges, "absent" to revoke.
:param grant_option: Only for state "present": If True, set
grant/admin option. If False, revoke it.
If None, don't change grant option.
:param schema_qualifier: Some object types ("TABLE", "SEQUENCE",
"FUNCTION") must be qualified by schema.
Ignored for other Types.
"""
# get_status: function to get current status
if obj_type == 'table':
get_status = partial(self.get_table_acls, schema_qualifier)
elif obj_type == 'sequence':
get_status = partial(self.get_sequence_acls, schema_qualifier)
elif obj_type == 'function':
get_status = partial(self.get_function_acls, schema_qualifier)
elif obj_type == 'schema':
get_status = self.get_schema_acls
elif obj_type == 'language':
get_status = self.get_language_acls
elif obj_type == 'tablespace':
get_status = self.get_tablespace_acls
elif obj_type == 'database':
get_status = self.get_database_acls
elif obj_type == 'group':
get_status = self.get_group_memberships
elif obj_type == 'default_privs':
get_status = partial(self.get_default_privs, schema_qualifier)
elif obj_type == 'foreign_data_wrapper':
get_status = self.get_foreign_data_wrapper_acls
elif obj_type == 'foreign_server':
get_status = self.get_foreign_server_acls
else:
raise Error('Unsupported database object type "%s".' % obj_type)
# Return False (nothing has changed) if there are no objs to work on.
if not objs:
return False
# obj_ids: quoted db object identifiers (sometimes schema-qualified)
if obj_type == 'function':
obj_ids = []
for obj in objs:
try:
f, args = obj.split('(', 1)
except Exception:
raise Error('Illegal function signature: "%s".' % obj)
obj_ids.append('"%s"."%s"(%s' % (schema_qualifier, f, args))
elif obj_type in ['table', 'sequence']:
obj_ids = ['"%s"."%s"' % (schema_qualifier, o) for o in objs]
else:
obj_ids = ['"%s"' % o for o in objs]
# set_what: SQL-fragment specifying what to set for the target roles:
# Either group membership or privileges on objects of a certain type
if obj_type == 'group':
set_what = ','.join(pg_quote_identifier(i, 'role') for i in obj_ids)
elif obj_type == 'default_privs':
# We don't want privs to be quoted here
set_what = ','.join(privs)
else:
# function types are already quoted above
if obj_type != 'function':
obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids]
# Note: obj_type has been checked against a set of string literals
# and privs was escaped when it was parsed
# Note: Underscores are replaced with spaces to support multi-word obj_type
set_what = '%s ON %s %s' % (','.join(privs), obj_type.replace('_', ' '),
','.join(obj_ids))
# for_whom: SQL-fragment specifying for whom to set the above
if roles == 'PUBLIC':
for_whom = 'PUBLIC'
else:
for_whom = []
for r in roles:
if not role_exists(self.module, self.cursor, r):
if fail_on_role:
self.module.fail_json(msg="Role '%s' does not exist" % r.strip())
else:
self.module.warn("Role '%s' does not exist, pass it" % r.strip())
else:
for_whom.append(pg_quote_identifier(r, 'role'))
if not for_whom:
return False
for_whom = ','.join(for_whom)
# as_who:
as_who = None
if target_roles:
as_who = ','.join(pg_quote_identifier(r, 'role') for r in target_roles)
status_before = get_status(objs)
query = QueryBuilder(state) \
.for_objtype(obj_type) \
.with_grant_option(grant_option) \
.for_whom(for_whom) \
.as_who(as_who) \
.for_schema(schema_qualifier) \
.set_what(set_what) \
.for_objs(objs) \
.build()
executed_queries.append(query)
self.cursor.execute(query)
status_after = get_status(objs)
return status_before != status_after
class QueryBuilder(object):
def __init__(self, state):
self._grant_option = None
self._for_whom = None
self._as_who = None
self._set_what = None
self._obj_type = None
self._state = state
self._schema = None
self._objs = None
self.query = []
def for_objs(self, objs):
self._objs = objs
return self
def for_schema(self, schema):
self._schema = schema
return self
def with_grant_option(self, option):
self._grant_option = option
return self
def for_whom(self, who):
self._for_whom = who
return self
def as_who(self, target_roles):
self._as_who = target_roles
return self
def set_what(self, what):
self._set_what = what
return self
def for_objtype(self, objtype):
self._obj_type = objtype
return self
def build(self):
if self._state == 'present':
self.build_present()
elif self._state == 'absent':
self.build_absent()
else:
self.build_absent()
return '\n'.join(self.query)
def add_default_revoke(self):
for obj in self._objs:
if self._as_who:
self.query.append(
'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
self._schema, obj,
self._for_whom))
else:
self.query.append(
'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
self._for_whom))
def add_grant_option(self):
if self._grant_option:
if self._obj_type == 'group':
self.query[-1] += ' WITH ADMIN OPTION;'
else:
self.query[-1] += ' WITH GRANT OPTION;'
else:
self.query[-1] += ';'
if self._obj_type == 'group':
self.query.append('REVOKE ADMIN OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
elif not self._obj_type == 'default_privs':
self.query.append('REVOKE GRANT OPTION FOR {0} FROM {1};'.format(self._set_what, self._for_whom))
def add_default_priv(self):
for obj in self._objs:
if self._as_who:
self.query.append(
'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT {2} ON {3} TO {4}'.format(self._as_who,
self._schema,
self._set_what,
obj,
self._for_whom))
else:
self.query.append(
'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT {1} ON {2} TO {3}'.format(self._schema,
self._set_what,
obj,
self._for_whom))
self.add_grant_option()
if self._as_who:
self.query.append(
'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} GRANT USAGE ON TYPES TO {2}'.format(self._as_who,
self._schema,
self._for_whom))
else:
self.query.append(
'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} GRANT USAGE ON TYPES TO {1}'.format(self._schema, self._for_whom))
self.add_grant_option()
def build_present(self):
if self._obj_type == 'default_privs':
self.add_default_revoke()
self.add_default_priv()
else:
self.query.append('GRANT {0} TO {1}'.format(self._set_what, self._for_whom))
self.add_grant_option()
def build_absent(self):
if self._obj_type == 'default_privs':
self.query = []
for obj in ['TABLES', 'SEQUENCES', 'TYPES']:
if self._as_who:
self.query.append(
'ALTER DEFAULT PRIVILEGES FOR ROLE {0} IN SCHEMA {1} REVOKE ALL ON {2} FROM {3};'.format(self._as_who,
self._schema, obj,
self._for_whom))
else:
self.query.append(
'ALTER DEFAULT PRIVILEGES IN SCHEMA {0} REVOKE ALL ON {1} FROM {2};'.format(self._schema, obj,
self._for_whom))
else:
self.query.append('REVOKE {0} FROM {1};'.format(self._set_what, self._for_whom))
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
database=dict(required=True, aliases=['db', 'login_db']),
state=dict(default='present', choices=['present', 'absent']),
privs=dict(required=False, aliases=['priv']),
type=dict(default='table',
choices=['table',
'sequence',
'function',
'database',
'schema',
'language',
'tablespace',
'group',
'default_privs',
'foreign_data_wrapper',
'foreign_server']),
objs=dict(required=False, aliases=['obj']),
schema=dict(required=False),
roles=dict(required=True, aliases=['role']),
session_role=dict(required=False),
target_roles=dict(required=False),
grant_option=dict(required=False, type='bool',
aliases=['admin_option']),
host=dict(default='', aliases=['login_host']),
unix_socket=dict(default='', aliases=['login_unix_socket']),
login=dict(default='postgres', aliases=['login_user']),
password=dict(default='', aliases=['login_password'], no_log=True),
fail_on_role=dict(type='bool', default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
fail_on_role = module.params['fail_on_role']
# Create type object as namespace for module params
p = type('Params', (), module.params)
# param "schema": default, allowed depends on param "type"
if p.type in ['table', 'sequence', 'function', 'default_privs']:
p.schema = p.schema or 'public'
elif p.schema:
module.fail_json(msg='Argument "schema" is not allowed '
'for type "%s".' % p.type)
# param "objs": default, required depends on param "type"
if p.type == 'database':
p.objs = p.objs or p.database
elif not p.objs:
module.fail_json(msg='Argument "objs" is required '
'for type "%s".' % p.type)
# param "privs": allowed, required depends on param "type"
if p.type == 'group':
if p.privs:
module.fail_json(msg='Argument "privs" is not allowed '
'for type "group".')
elif not p.privs:
module.fail_json(msg='Argument "privs" is required '
'for type "%s".' % p.type)
# Connect to Database
if not psycopg2:
module.fail_json(msg=missing_required_lib('psycopg2'), exception=PSYCOPG2_IMP_ERR)
try:
conn = Connection(p, module)
except psycopg2.Error as e:
module.fail_json(msg='Could not connect to database: %s' % to_native(e), exception=traceback.format_exc())
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
module.fail_json(msg="unable to connect to database: %s" % to_native(e), exception=traceback.format_exc())
except ValueError as e:
# We raise this when the psycopg library is too old
module.fail_json(msg=to_native(e))
if p.session_role:
try:
conn.cursor.execute('SET ROLE %s' % pg_quote_identifier(p.session_role, 'role'))
except Exception as e:
module.fail_json(msg="Could not switch to role %s: %s" % (p.session_role, to_native(e)), exception=traceback.format_exc())
try:
# privs
if p.privs:
privs = frozenset(pr.upper() for pr in p.privs.split(','))
if not privs.issubset(VALID_PRIVS):
module.fail_json(msg='Invalid privileges specified: %s' % privs.difference(VALID_PRIVS))
else:
privs = None
# objs:
if p.type == 'table' and p.objs == 'ALL_IN_SCHEMA':
objs = conn.get_all_tables_in_schema(p.schema)
elif p.type == 'sequence' and p.objs == 'ALL_IN_SCHEMA':
objs = conn.get_all_sequences_in_schema(p.schema)
elif p.type == 'function' and p.objs == 'ALL_IN_SCHEMA':
objs = conn.get_all_functions_in_schema(p.schema)
elif p.type == 'default_privs':
if p.objs == 'ALL_DEFAULT':
objs = frozenset(VALID_DEFAULT_OBJS.keys())
else:
objs = frozenset(obj.upper() for obj in p.objs.split(','))
if not objs.issubset(VALID_DEFAULT_OBJS):
module.fail_json(
msg='Invalid Object set specified: %s' % objs.difference(VALID_DEFAULT_OBJS.keys()))
# Again, do we have valid privs specified for object type:
valid_objects_for_priv = frozenset(obj for obj in objs if privs.issubset(VALID_DEFAULT_OBJS[obj]))
if not valid_objects_for_priv == objs:
module.fail_json(
msg='Invalid priv specified. Valid object for priv: {0}. Objects: {1}'.format(
valid_objects_for_priv, objs))
else:
objs = p.objs.split(',')
# function signatures are encoded using ':' to separate args
if p.type == 'function':
objs = [obj.replace(':', ',') for obj in objs]
# roles
if p.roles == 'PUBLIC':
roles = 'PUBLIC'
else:
roles = p.roles.split(',')
if len(roles) == 1 and not role_exists(module, conn.cursor, roles[0]):
module.exit_json(changed=False)
if fail_on_role:
module.fail_json(msg="Role '%s' does not exist" % roles[0].strip())
else:
module.warn("Role '%s' does not exist, nothing to do" % roles[0].strip())
# check if target_roles is set with type: default_privs
if p.target_roles and not p.type == 'default_privs':
module.warn('"target_roles" will be ignored '
'Argument "type: default_privs" is required for usage of "target_roles".')
# target roles
if p.target_roles:
target_roles = p.target_roles.split(',')
else:
target_roles = None
changed = conn.manipulate_privs(
obj_type=p.type,
privs=privs,
objs=objs,
roles=roles,
target_roles=target_roles,
state=p.state,
grant_option=p.grant_option,
schema_qualifier=p.schema,
fail_on_role=fail_on_role,
)
except Error as e:
conn.rollback()
module.fail_json(msg=e.message, exception=traceback.format_exc())
except psycopg2.Error as e:
conn.rollback()
module.fail_json(msg=to_native(e.message))
if module.check_mode:
conn.rollback()
else:
conn.commit()
module.exit_json(changed=changed, queries=executed_queries)
if __name__ == '__main__':
main()
| |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo.serialization import jsonutils
from nova import block_device
from nova import context
from nova import test
from nova.tests import fake_instance
from nova.tests import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.volume import cinder
from nova.volume import encryptors
class TestDriverBlockDevice(test.NoDBTestCase):
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'snapshot': driver_block_device.DriverSnapshotBlockDevice,
'image': driver_block_device.DriverImageBlockDevice,
'blank': driver_block_device.DriverBlankBlockDevice
}
swap_bdm = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
snapshot_bdm = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
snapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
snapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
image_bdm = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
image_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
image_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
blank_bdm = block_device.BlockDeviceDict(
{'id': 6, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'blank',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1})
blank_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1}
blank_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.context = context.RequestContext('fake_user',
'fake_project')
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
self.assertRaises(driver_block_device._NotTransformable,
cls, {'no_device': True})
def _test_driver_device(self, name):
db_bdm = getattr(self, "%s_bdm" % name)
test_bdm = self.driver_classes[name](db_bdm)
self.assertThat(test_bdm, matchers.DictMatches(
getattr(self, "%s_driver_bdm" % name)))
for k, v in db_bdm.iteritems():
field_val = getattr(test_bdm._bdm_obj, k)
if isinstance(field_val, bool):
v = bool(v)
self.assertEqual(field_val, v)
self.assertThat(test_bdm.legacy(),
matchers.DictMatches(
getattr(self, "%s_legacy_driver_bdm" % name)))
# Test passthru attributes
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
# Make sure that all others raise _invalidType
for other_name, cls in self.driver_classes.iteritems():
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
# Test the save method
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
test_bdm.save(self.context)
for fld, alias in test_bdm._update_on_save.iteritems():
self.assertEqual(test_bdm[alias or fld],
getattr(test_bdm._bdm_obj, fld))
save_mock.assert_called_once_with(self.context)
# Test the save method with no context passed
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
test_bdm.save()
save_mock.assert_called_once_with()
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](no_size_bdm)
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm['connection_info'],
jsonutils.loads(test_bdm._bdm_obj.connection_info))
self.assertEqual(test_bdm._bdm_obj.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("snapshot")
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('image')
test_bdm = self.driver_classes['image'](
self.image_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('image')
bdm = self.image_bdm.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['image'], bdm)
def test_driver_blank_block_device(self):
self._test_driver_device('blank')
test_bdm = self.driver_classes['blank'](
self.blank_bdm)
self.assertEqual(6, test_bdm._bdm_obj.id)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.assertEqual(3, test_bdm.volume_size)
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, check_attach=True,
fail_check_attach=False, driver_attach=False,
fail_driver_attach=False, volume_attach=True,
access_mode='rw'):
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'access_mode': access_mode}}
expected_conn_info = {'data': {'access_mode': access_mode},
'serial': fake_volume['id']}
enc_data = {'fake': 'enc_data'}
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if check_attach:
if not fail_check_attach:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndReturn(None)
else:
self.volume_api.check_attach(self.context, fake_volume,
instance=instance).AndRaise(
test.TestingException)
return instance, expected_conn_info
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(connection_info)
if driver_attach:
encryptors.get_encryption_metadata(
elevated_context, self.volume_api, fake_volume['id'],
connection_info).AndReturn(enc_data)
if not fail_driver_attach:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndReturn(None)
else:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndRaise(test.TestingException)
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
expected_conn_info).AndReturn(None)
return instance, expected_conn_info
if volume_attach:
self.volume_api.attach(elevated_context, fake_volume['id'],
'fake_uuid', bdm_dict['device_name'],
mode=access_mode).AndReturn(None)
driver_bdm._bdm_obj.save(self.context).AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, access_mode='ro')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_check_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver)
def test_volume_no_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=False)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=False)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, check_attach=False,
driver_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_check_attach=False, do_driver_attach=True)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def check_volume_attach_driver_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
self.mox.ReplayAll()
self.asserRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
def test_refresh_connection(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
'serial': 'fake-volume-id-2'}
self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
test_bdm._bdm_obj.save(self.context).AndReturn(None)
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.snapshot_bdm.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['snapshot'](no_volume_snapshot)
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3,
'', '', snapshot).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['snapshot'](
self.snapshot_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.image_bdm.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['image'](no_volume_image)
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1,
'', '', image_id=image['id']).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_volume(self):
test_bdm = self.driver_classes['image'](
self.image_bdm)
instance = {'id': 'fake_id', 'uuid': 'fake_uuid'}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver, do_check_attach=True
).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_blank_attach_volume(self):
no_blank_volume = self.blank_bdm.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['blank'](no_blank_volume)
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': 'fake-uuid'})
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': 'fake-uuid-blank-vol'}
with contextlib.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(volume_class, 'attach')
) as (vol_create, vol_attach):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(self.context,
test_bdm.volume_size,
'fake-uuid-blank-vol',
'')
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver,
do_check_attach=True)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_convert_block_devices(self):
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'],
[self.volume_bdm, self.ephemeral_bdm])
self.assertEqual(converted, [self.volume_driver_bdm])
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['snapshot'](
self.snapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.snapshot_legacy_driver_bdm,
self.snapshot_legacy_driver_bdm])
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in xrange(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in xrange(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertIsNone(driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.image_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.snapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.image_bdm.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(local_image))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
test_image = self.driver_classes['image'](self.image_bdm)
test_snapshot = self.driver_classes['snapshot'](self.snapshot_bdm)
test_volume = self.driver_classes['volume'](self.volume_bdm)
test_blank = self.driver_classes['blank'](self.blank_bdm)
for bdm in (test_image, test_snapshot, test_volume, test_blank):
self.assertTrue(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
for bdm in (test_swap, test_ephemeral):
self.assertFalse(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
| |
"""Support for displaying collected data over SNMP."""
from __future__ import annotations
from datetime import timedelta
import logging
import pysnmp.hlapi.asyncio as hlapi
from pysnmp.hlapi.asyncio import (
CommunityData,
ContextData,
ObjectIdentity,
ObjectType,
SnmpEngine,
UdpTransportTarget,
UsmUserData,
getCmd,
)
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from .const import (
CONF_ACCEPT_ERRORS,
CONF_AUTH_KEY,
CONF_AUTH_PROTOCOL,
CONF_BASEOID,
CONF_COMMUNITY,
CONF_DEFAULT_VALUE,
CONF_PRIV_KEY,
CONF_PRIV_PROTOCOL,
CONF_VERSION,
DEFAULT_AUTH_PROTOCOL,
DEFAULT_COMMUNITY,
DEFAULT_HOST,
DEFAULT_NAME,
DEFAULT_PORT,
DEFAULT_PRIV_PROTOCOL,
DEFAULT_VERSION,
MAP_AUTH_PROTOCOLS,
MAP_PRIV_PROTOCOLS,
SNMP_VERSIONS,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_BASEOID): cv.string,
vol.Optional(CONF_ACCEPT_ERRORS, default=False): cv.boolean,
vol.Optional(CONF_COMMUNITY, default=DEFAULT_COMMUNITY): cv.string,
vol.Optional(CONF_DEFAULT_VALUE): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERSION, default=DEFAULT_VERSION): vol.In(SNMP_VERSIONS),
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_AUTH_KEY): cv.string,
vol.Optional(CONF_AUTH_PROTOCOL, default=DEFAULT_AUTH_PROTOCOL): vol.In(
MAP_AUTH_PROTOCOLS
),
vol.Optional(CONF_PRIV_KEY): cv.string,
vol.Optional(CONF_PRIV_PROTOCOL, default=DEFAULT_PRIV_PROTOCOL): vol.In(
MAP_PRIV_PROTOCOLS
),
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the SNMP sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
community = config.get(CONF_COMMUNITY)
baseoid = config.get(CONF_BASEOID)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
version = config[CONF_VERSION]
username = config.get(CONF_USERNAME)
authkey = config.get(CONF_AUTH_KEY)
authproto = config[CONF_AUTH_PROTOCOL]
privkey = config.get(CONF_PRIV_KEY)
privproto = config[CONF_PRIV_PROTOCOL]
accept_errors = config.get(CONF_ACCEPT_ERRORS)
default_value = config.get(CONF_DEFAULT_VALUE)
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
if version == "3":
if not authkey:
authproto = "none"
if not privkey:
privproto = "none"
request_args = [
SnmpEngine(),
UsmUserData(
username,
authKey=authkey or None,
privKey=privkey or None,
authProtocol=getattr(hlapi, MAP_AUTH_PROTOCOLS[authproto]),
privProtocol=getattr(hlapi, MAP_PRIV_PROTOCOLS[privproto]),
),
UdpTransportTarget((host, port)),
ContextData(),
]
else:
request_args = [
SnmpEngine(),
CommunityData(community, mpModel=SNMP_VERSIONS[version]),
UdpTransportTarget((host, port)),
ContextData(),
]
errindication, _, _, _ = await getCmd(
*request_args, ObjectType(ObjectIdentity(baseoid))
)
if errindication and not accept_errors:
_LOGGER.error("Please check the details in the configuration file")
return
data = SnmpData(request_args, baseoid, accept_errors, default_value)
async_add_entities([SnmpSensor(data, name, unit, value_template)], True)
class SnmpSensor(SensorEntity):
"""Representation of a SNMP sensor."""
def __init__(self, data, name, unit_of_measurement, value_template):
"""Initialize the sensor."""
self.data = data
self._name = name
self._state = None
self._unit_of_measurement = unit_of_measurement
self._value_template = value_template
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
async def async_update(self):
"""Get the latest data and updates the states."""
await self.data.async_update()
if (value := self.data.value) is None:
value = STATE_UNKNOWN
elif self._value_template is not None:
value = self._value_template.async_render_with_possible_json_value(
value, STATE_UNKNOWN
)
self._state = value
class SnmpData:
"""Get the latest data and update the states."""
def __init__(self, request_args, baseoid, accept_errors, default_value):
"""Initialize the data object."""
self._request_args = request_args
self._baseoid = baseoid
self._accept_errors = accept_errors
self._default_value = default_value
self.value = None
async def async_update(self):
"""Get the latest data from the remote SNMP capable host."""
errindication, errstatus, errindex, restable = await getCmd(
*self._request_args, ObjectType(ObjectIdentity(self._baseoid))
)
if errindication and not self._accept_errors:
_LOGGER.error("SNMP error: %s", errindication)
elif errstatus and not self._accept_errors:
_LOGGER.error(
"SNMP error: %s at %s",
errstatus.prettyPrint(),
errindex and restable[-1][int(errindex) - 1] or "?",
)
elif (errindication or errstatus) and self._accept_errors:
self.value = self._default_value
else:
for resrow in restable:
self.value = resrow[-1].prettyPrint()
| |
from django.db import OperationalError
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import render, Http404
import datetime
from django.core.urlresolvers import reverse
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from PiControl.models import Pin, Schedule
from PiControl.models import Git
from PiControl.pin_controller import PinController
from .forms import PinForm, ScheduleForm
import rollbar
# Create the pin controller instance
pin_controller = PinController()
def dashboard(request):
return render(request, "dashboard.html", pin_controller.get_dashboard_data())
def pins(request):
return render(request, "pins/pins.html", pin_controller.get_all_pins())
def pin_create(request):
return render(request, "pins/pin-create-edit.html", {'form': PinForm()})
def pin_delete(request, id):
try:
pin = pin_controller.my_pins.get(id=id)
except Pin.DoesNotExist:
raise Http404("Could not find that pin!")
result = list(pin.delete()[1].values())[0]
pin_controller.set_all_pins()
return JsonResponse({'success': result})
def pin_post(request):
if request.method != 'POST':
return HttpResponseRedirect("/pins")
id = request.POST.get('id')
if id.isdigit():
pin = pin_controller.my_pins.get(id=id)
else:
pin = Pin()
form = PinForm(request.POST, instance=pin)
if form.is_valid():
form.save()
pin_controller.set_all_pins()
return HttpResponseRedirect("/pins")
return render(request, "pins/pin-create-edit.html", {'form': form})
def pin_edit(request, id):
try:
pin = pin_controller.my_pins.get(id=id)
except Pin.DoesNotExist:
raise Http404("Could not find that pin!")
form = PinForm(instance=pin)
return render(request, "pins/pin-create-edit.html", {"form": form})
def pin_set(request):
if request.method != 'POST':
return HttpResponseRedirect("/")
pin_id = request.POST['pin']
state = True if request.POST['state'] == '1' or request.POST['state'].lower() == 'true' else False
result = False
try:
pin = pin_controller.my_pins.get(id=pin_id)
except Pin.DoesNotExist:
return JsonResponse({'success': False, 'state': state, 'message': 'Can not find pin'})
pin.set_state(state)
new_state = pin.get_state()
if new_state == state:
result = True
return JsonResponse({'success': result, 'state': new_state})
def git_update(request):
git = Git()
if request.method == 'POST':
result = git.update()
return JsonResponse({'success': result})
status = git.check()
return render(request, "git/update.html", {"status": status})
def get_temp(request):
if request.method == 'POST':
pin_id = request.POST['id']
else:
pin_id = request.GET['id']
try:
pin = pin_controller.my_pins.get(id=pin_id)
except Pin.DoesNotExist:
return JsonResponse({'success': False, 'message': 'Pin not found'})
return JsonResponse({'success': True, 'temp': pin.get_temp()})
def schedule(request):
days = Schedule.objects.all()
return render(request, "schedule/index.html", {"days": days})
def schedule_edit(request, id):
try:
s = Schedule.objects.get(id=id)
except Schedule.DoesNotExist:
raise Http404("Could not find that pin!")
form = ScheduleForm(instance=s)
return render(request, "schedule/create-edit.html", {"form": form})
def schedule_create(request):
return render(request, "schedule/create-edit.html", {"form": ScheduleForm()})
def schedule_post(request):
if request.method != 'POST':
return HttpResponseRedirect(reverse('schedule'))
id = request.POST.get('id')
s = Schedule()
if id.isdigit():
s = Schedule.objects.get(id=id)
form = ScheduleForm(request.POST, instance=s)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('schedule'))
return render(request, "schedule/create-edit.html", {'form': form})
def schedule_delete(request, id):
try:
s = Schedule.objects.get(id=id)
except Schedule.DoesNotExist:
raise Http404("Could not find that pin!")
result = list(s.delete()[1].values())[0]
return JsonResponse({'success': result})
@csrf_exempt
def google_set_ac(request):
token = settings.API_TOKEN
date_now = datetime.datetime.now()
if request.method != 'POST':
return JsonResponse({'success': False})
try:
if token != request.POST.get('token'):
return JsonResponse({'success': False})
minutes = request.POST.get('minutes', 30)
current = Schedule.objects.first()
current.start_at = date_now.time()
current.end_at = (date_now + datetime.timedelta(minutes=int(minutes))).time()
current.day_of_week = date_now.weekday()
current.active = True
current.save()
current.activate()
return JsonResponse({'success': True})
except:
rollbar.report_exc_info()
return JsonResponse({'success': False})
@csrf_exempt
def bowling_results(request):
bowling_results = open('..//ZoneBowlingStats//Scraper//results.json', 'r').read()
return JsonResponse({'results': bowling_results})
| |
# Copyright (c) 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
from oslo_log import log as logging
from oslo_serialization import jsonutils
import requests
import six
from nova.tests.unit.image import fake
LOG = logging.getLogger(__name__)
class APIResponse(object):
"""Decoded API Response
This provides a decoded version of the Requests response which
include a json decoded body, far more convenient for testing that
returned structures are correct, or using parts of returned
structures in tests.
This class is a simple wrapper around dictionaries for API
responses in tests. It includes extra attributes so that they can
be inspected in addition to the attributes.
All json responses from Nova APIs are dictionary compatible, or
blank, so other possible base classes are not needed.
"""
status = 200
"""The HTTP status code as an int"""
content = ""
"""The Raw HTTP response body as a string"""
body = {}
"""The decoded json body as a dictionary"""
headers = {}
"""Response headers as a dictionary"""
def __init__(self, response):
"""Construct an API response from a Requests response
:param response: a ``requests`` library response
"""
super(APIResponse, self).__init__()
self.status = response.status_code
self.content = response.content
if self.content:
self.body = jsonutils.loads(self.content)
self.headers = response.headers
def __str__(self):
# because __str__ falls back to __repr__ we can still use repr
# on self but add in the other attributes.
return "<Response body:%r, status_code:%s>" % (self.body, self.status)
class OpenStackApiException(Exception):
def __init__(self, message=None, response=None):
self.response = response
if not message:
message = 'Unspecified error'
if response:
_status = response.status_code
_body = response.content
message = ('%(message)s\nStatus Code: %(_status)s\n'
'Body: %(_body)s' %
{'message': message, '_status': _status,
'_body': _body})
super(OpenStackApiException, self).__init__(message)
class OpenStackApiAuthenticationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Authentication error"
super(OpenStackApiAuthenticationException, self).__init__(message,
response)
class OpenStackApiAuthorizationException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Authorization error"
super(OpenStackApiAuthorizationException, self).__init__(message,
response)
class OpenStackApiNotFoundException(OpenStackApiException):
def __init__(self, response=None, message=None):
if not message:
message = "Item not found"
super(OpenStackApiNotFoundException, self).__init__(message, response)
class TestOpenStackClient(object):
"""Simple OpenStack API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing
"""
def __init__(self, auth_user, auth_key, auth_uri):
super(TestOpenStackClient, self).__init__()
self.auth_result = None
self.auth_user = auth_user
self.auth_key = auth_key
self.auth_uri = auth_uri
# default project_id
self.project_id = 'openstack'
def request(self, url, method='GET', body=None, headers=None):
_headers = {'Content-Type': 'application/json'}
_headers.update(headers or {})
response = requests.request(method, url, data=body, headers=_headers)
return response
def _authenticate(self):
if self.auth_result:
return self.auth_result
auth_uri = self.auth_uri
headers = {'X-Auth-User': self.auth_user,
'X-Auth-Key': self.auth_key,
'X-Auth-Project-Id': self.project_id}
response = self.request(auth_uri,
headers=headers)
http_status = response.status_code
LOG.debug("%(auth_uri)s => code %(http_status)s",
{'auth_uri': auth_uri, 'http_status': http_status})
if http_status == 401:
raise OpenStackApiAuthenticationException(response=response)
self.auth_result = response.headers
return self.auth_result
def api_request(self, relative_uri, check_response_status=None,
strip_version=False, **kwargs):
auth_result = self._authenticate()
# NOTE(justinsb): httplib 'helpfully' converts headers to lower case
base_uri = auth_result['x-server-management-url']
if strip_version:
# NOTE(vish): cut out version number and tenant_id
base_uri = '/'.join(base_uri.split('/', 3)[:-1])
full_uri = '%s/%s' % (base_uri, relative_uri)
headers = kwargs.setdefault('headers', {})
headers['X-Auth-Token'] = auth_result['x-auth-token']
response = self.request(full_uri, **kwargs)
http_status = response.status_code
LOG.debug("%(relative_uri)s => code %(http_status)s",
{'relative_uri': relative_uri, 'http_status': http_status})
if check_response_status:
if http_status not in check_response_status:
if http_status == 404:
raise OpenStackApiNotFoundException(response=response)
elif http_status == 401:
raise OpenStackApiAuthorizationException(response=response)
else:
raise OpenStackApiException(
message="Unexpected status code",
response=response)
return response
def _decode_json(self, response):
resp = APIResponse(status=response.status_code)
if response.content:
resp.body = jsonutils.loads(response.content)
return resp
def api_get(self, relative_uri, **kwargs):
kwargs.setdefault('check_response_status', [200])
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_post(self, relative_uri, body, **kwargs):
kwargs['method'] = 'POST'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202])
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_put(self, relative_uri, body, **kwargs):
kwargs['method'] = 'PUT'
if body:
headers = kwargs.setdefault('headers', {})
headers['Content-Type'] = 'application/json'
kwargs['body'] = jsonutils.dumps(body)
kwargs.setdefault('check_response_status', [200, 202, 204])
return APIResponse(self.api_request(relative_uri, **kwargs))
def api_delete(self, relative_uri, **kwargs):
kwargs['method'] = 'DELETE'
kwargs.setdefault('check_response_status', [200, 202, 204])
return APIResponse(self.api_request(relative_uri, **kwargs))
#####################################
#
# Convenience methods
#
# The following are a set of convenience methods to get well known
# resources, they can be helpful in setting up resources in
# tests. All of these convenience methods throw exceptions if they
# get a non 20x status code, so will appropriately abort tests if
# they fail.
#
# They all return the most relevant part of their response body as
# decoded data structure.
#
#####################################
def get_server(self, server_id):
return self.api_get('/servers/%s' % server_id).body['server']
def get_servers(self, detail=True, search_opts=None):
rel_url = '/servers/detail' if detail else '/servers'
if search_opts is not None:
qparams = {}
for opt, val in six.iteritems(search_opts):
qparams[opt] = val
if qparams:
query_string = "?%s" % urllib.urlencode(qparams)
rel_url += query_string
return self.api_get(rel_url).body['servers']
def post_server(self, server):
response = self.api_post('/servers', server).body
if 'reservation_id' in response:
return response
else:
return response['server']
def put_server(self, server_id, server):
return self.api_put('/servers/%s' % server_id, server).body
def post_server_action(self, server_id, data):
return self.api_post('/servers/%s/action' % server_id, data).body
def delete_server(self, server_id):
return self.api_delete('/servers/%s' % server_id)
def get_image(self, image_id):
return self.api_get('/images/%s' % image_id).body['image']
def get_images(self, detail=True):
rel_url = '/images/detail' if detail else '/images'
return self.api_get(rel_url).body['images']
def post_image(self, image):
return self.api_post('/images', image).body['image']
def delete_image(self, image_id):
return self.api_delete('/images/%s' % image_id)
def get_flavor(self, flavor_id):
return self.api_get('/flavors/%s' % flavor_id).body['flavor']
def get_flavors(self, detail=True):
rel_url = '/flavors/detail' if detail else '/flavors'
return self.api_get(rel_url).body['flavors']
def post_flavor(self, flavor):
return self.api_post('/flavors', flavor).body['flavor']
def delete_flavor(self, flavor_id):
return self.api_delete('/flavors/%s' % flavor_id)
def post_extra_spec(self, flavor_id, spec):
return self.api_post('/flavors/%s/os-extra_specs' %
flavor_id, spec)
def get_volume(self, volume_id):
return self.api_get('/volumes/%s' % volume_id).body['volume']
def get_volumes(self, detail=True):
rel_url = '/volumes/detail' if detail else '/volumes'
return self.api_get(rel_url).body['volumes']
def post_volume(self, volume):
return self.api_post('/volumes', volume).body['volume']
def delete_volume(self, volume_id):
return self.api_delete('/volumes/%s' % volume_id)
def get_server_volume(self, server_id, attachment_id):
return self.api_get('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id)
).body['volumeAttachment']
def get_server_volumes(self, server_id):
return self.api_get('/servers/%s/os-volume_attachments' %
(server_id)).body['volumeAttachments']
def post_server_volume(self, server_id, volume_attachment):
return self.api_post('/servers/%s/os-volume_attachments' %
(server_id), volume_attachment
).body['volumeAttachment']
def delete_server_volume(self, server_id, attachment_id):
return self.api_delete('/servers/%s/os-volume_attachments/%s' %
(server_id, attachment_id))
def post_server_metadata(self, server_id, metadata):
post_body = {'metadata': {}}
post_body['metadata'].update(metadata)
return self.api_post('/servers/%s/metadata' % server_id,
post_body).body['metadata']
class TestOpenStackClientV3(TestOpenStackClient):
"""Simple OpenStack v3 API Client.
This is a really basic OpenStack API client that is under our control,
so we can make changes / insert hooks for testing.
Note that the V3 API does not have an image API and so it is
not possible to query the api for the image information.
So instead we just access the fake image service used by the unittests
directly.
"""
def get_image(self, image_id):
return fake._fakeImageService.show(None, image_id)
def get_images(self, detail=True):
return fake._fakeImageService.detail(None)
def post_image(self, image):
raise NotImplementedError
def delete_image(self, image_id):
return fake._fakeImageService.delete(None, image_id)
class TestOpenStackClientV3Mixin(object):
def _get_test_client(self):
return TestOpenStackClientV3('fake', 'fake', self.auth_url)
| |
import asyncio
import logging
from functools import partial
from unittest import mock
import pytest
from raven_aiohttp import QueuedAioHttpTransport
from tests.utils import Logger
pytestmark = pytest.mark.asyncio
@asyncio.coroutine
def test_basic(fake_server, raven_client, wait):
server = yield from fake_server()
client, transport = raven_client(server, QueuedAioHttpTransport)
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from wait(transport)
assert server.hits[200] == 1
@asyncio.coroutine
def test_no_keepalive(fake_server, raven_client, wait):
transport = QueuedAioHttpTransport(keepalive=False)
assert not hasattr(transport, '_client_session')
yield from transport.close()
server = yield from fake_server()
client, transport = raven_client(server, QueuedAioHttpTransport)
transport._keepalive = False
session = transport._client_session
def _client_session_factory():
return session
with mock.patch(
'raven_aiohttp.QueuedAioHttpTransport._client_session_factory',
side_effect=_client_session_factory,
):
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from wait(transport)
assert session.closed
assert server.hits[200] == 1
@asyncio.coroutine
def test_close_timeout(fake_server, raven_client):
server = yield from fake_server()
server.slop_factor = 100
client, transport = raven_client(server, QueuedAioHttpTransport)
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from transport.close(timeout=0)
assert server.hits[200] == 0
@asyncio.coroutine
def test_rate_limit(fake_server, raven_client, wait):
server = yield from fake_server()
server.side_effect['status'] = 429
with Logger('sentry.errors', level=logging.ERROR) as log:
client, transport = raven_client(server, QueuedAioHttpTransport)
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from wait(transport)
assert server.hits[429] == 1
msg = 'Sentry responded with an API error: RateLimited(None)'
assert log.msgs[0] == msg
@asyncio.coroutine
def test_rate_limit_retry_after(fake_server, raven_client, wait):
server = yield from fake_server()
server.side_effect['status'] = 429
server.side_effect['headers'] = {'Retry-After': '1'}
with Logger('sentry.errors', level=logging.ERROR) as log:
client, transport = raven_client(server, QueuedAioHttpTransport)
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from wait(transport)
assert server.hits[429] == 1
msg = 'Sentry responded with an API error: RateLimited(None)'
assert log.msgs[0] == msg
@asyncio.coroutine
def test_status_500(fake_server, raven_client, wait):
server = yield from fake_server()
server.side_effect['status'] = 500
with Logger('sentry.errors', level=logging.ERROR) as log:
client, transport = raven_client(server, QueuedAioHttpTransport)
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from wait(transport)
assert server.hits[500] == 1
msg = 'Sentry responded with an API error: APIError(None)'
assert log.msgs[0] == msg
@asyncio.coroutine
def test_cancelled_error(event_loop, fake_server, raven_client, wait):
server = yield from fake_server()
with mock.patch(
'aiohttp.ClientSession.post',
side_effect=asyncio.CancelledError,
):
client, transport = raven_client(server, QueuedAioHttpTransport)
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from wait(transport)
assert server.hits[200] == 0
with pytest.raises(asyncio.CancelledError):
yield from asyncio.gather(*transport._workers, loop=event_loop)
@asyncio.coroutine
def test_async_send_when_closed(fake_server, raven_client):
server = yield from fake_server()
with Logger('sentry.errors', level=logging.ERROR) as log:
client, transport = raven_client(server, QueuedAioHttpTransport)
close = transport.close()
try:
1 / 0
except ZeroDivisionError:
client.captureException()
assert server.hits[200] == 0
assert log.msgs[0].startswith(
'Sentry responded with an error: QueuedAioHttpTransport is closed')
yield from close
@asyncio.coroutine
def test_async_send_queue_full(fake_server, raven_client, wait):
server = yield from fake_server()
with Logger('sentry.errors', level=logging.ERROR) as log:
transport = partial(QueuedAioHttpTransport, qsize=1)
client, transport = raven_client(server, transport)
try:
1 / 0
except ZeroDivisionError:
client.captureException()
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from wait(transport)
assert server.hits[200] == 1
msg = 'Sentry responded with an error: ' \
'QueuedAioHttpTransport internal queue is full'
assert log.msgs[0].startswith(msg)
@asyncio.coroutine
def test_async_send_queue_full_close(fake_server, raven_client):
server = yield from fake_server()
with Logger('sentry.errors', level=logging.ERROR) as log:
transport = partial(QueuedAioHttpTransport, qsize=1)
client, transport = raven_client(server, transport)
try:
1 / 0
except ZeroDivisionError:
client.captureException()
yield from transport.close()
assert server.hits[200] == 0
msg = 'Sentry responded with an error: ' \
'QueuedAioHttpTransport internal queue was full'
assert log.msgs[0].startswith(msg)
| |
# Copyright 2013 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Violin Memory iSCSI Driver for OpenStack Cinder
Provides iSCSI specific LUN services for V6000 series flash arrays.
This driver requires VMOS v6.3.0.4 or newer software on the array.
You will need to install the Violin Memory REST client library:
sudo pip install vmemclient
Set the following in the cinder.conf file to enable the VMEM V6000
ISCSI Driver along with the required flags:
volume_driver=cinder.volume.drivers.violin.v6000_iscsi.V6000ISCSIDriver
NOTE: this driver file requires the use of synchronization points for
certain types of backend operations, and as a result may not work
properly in an active-active HA configuration. See OpenStack Cinder
driver documentation for more information.
"""
import random
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
from cinder import context
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume.drivers.violin import v6000_common
LOG = logging.getLogger(__name__)
class V6000ISCSIDriver(driver.ISCSIDriver):
"""Executes commands relating to iSCSI-based Violin Memory Arrays.
Version history:
1.0 - Initial driver
1.0.1 - Fixes polling for export completion
"""
VERSION = '1.0.1'
TARGET_GROUP_NAME = 'openstack'
def __init__(self, *args, **kwargs):
super(V6000ISCSIDriver, self).__init__(*args, **kwargs)
self.array_info = []
self.gateway_iscsi_ip_addresses_mga = []
self.gateway_iscsi_ip_addresses_mgb = []
self.stats = {}
self.configuration.append_config_values(v6000_common.violin_opts)
self.configuration.append_config_values(san.san_opts)
self.common = v6000_common.V6000Common(self.configuration)
LOG.info(_LI("Initialized driver %(name)s version: %(vers)s."),
{'name': self.__class__.__name__, 'vers': self.VERSION})
def do_setup(self, context):
"""Any initialization the driver does while starting."""
super(V6000ISCSIDriver, self).do_setup(context)
self.common.do_setup(context)
self.gateway_iscsi_ip_addresses_mga = self._get_active_iscsi_ips(
self.common.mga)
for ip in self.gateway_iscsi_ip_addresses_mga:
self.array_info.append({"node": self._get_hostname('mga'),
"addr": ip,
"conn": self.common.mga})
self.gateway_iscsi_ip_addresses_mgb = self._get_active_iscsi_ips(
self.common.mgb)
for ip in self.gateway_iscsi_ip_addresses_mgb:
self.array_info.append({"node": self._get_hostname('mgb'),
"addr": ip,
"conn": self.common.mgb})
# setup global target group for exports to use
self._create_iscsi_target_group()
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
self.common.check_for_setup_error()
bn = "/vshare/config/iscsi/enable"
resp = self.common.vip.basic.get_node_values(bn)
if resp[bn] is not True:
raise exception.ViolinInvalidBackendConfig(
reason=_('iSCSI is not enabled'))
if len(self.gateway_iscsi_ip_addresses_mga) == 0:
raise exception.ViolinInvalidBackendConfig(
reason=_('no available iSCSI IPs on mga'))
if len(self.gateway_iscsi_ip_addresses_mgb) == 0:
raise exception.ViolinInvalidBackendConfig(
reason=_('no available iSCSI IPs on mgb'))
def create_volume(self, volume):
"""Creates a volume."""
self.common._create_lun(volume)
def delete_volume(self, volume):
"""Deletes a volume."""
self.common._delete_lun(volume)
def extend_volume(self, volume, new_size):
"""Deletes a volume."""
self.common._extend_lun(volume, new_size)
def create_snapshot(self, snapshot):
"""Creates a snapshot from an existing volume."""
self.common._create_lun_snapshot(snapshot)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
self.common._delete_lun_snapshot(snapshot)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
ctxt = context.get_admin_context()
snapshot['size'] = snapshot['volume']['size']
self.common._create_lun(volume)
self.copy_volume_data(ctxt, snapshot, volume)
def create_cloned_volume(self, volume, src_vref):
"""Creates a full clone of the specified volume."""
ctxt = context.get_admin_context()
self.common._create_lun(volume)
self.copy_volume_data(ctxt, src_vref, volume)
def ensure_export(self, context, volume):
"""Synchronously checks and re-exports volumes at cinder start time."""
pass
def create_export(self, context, volume, connector):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
pass
def initialize_connection(self, volume, connector):
"""Initializes the connection (target<-->initiator)."""
igroup = None
if self.configuration.use_igroups:
#
# Most drivers don't use igroups, because there are a
# number of issues with multipathing and iscsi/fcp where
# lun devices either aren't cleaned up properly or are
# stale (from previous scans).
#
# If the customer really wants igroups for whatever
# reason, we create a new igroup for each host/hypervisor.
# Every lun that is exported to the particular
# hypervisor/host will be contained in this igroup. This
# should prevent other hosts from seeing luns they aren't
# using when they perform scans.
#
igroup = self.common._get_igroup(volume, connector)
self._add_igroup_member(connector, igroup)
tgt = self._get_iscsi_target()
target_name = self.TARGET_GROUP_NAME
if isinstance(volume, models.Volume):
lun = self._export_lun(volume, connector, igroup)
else:
lun = self._export_snapshot(volume, connector, igroup)
iqn = "%s%s:%s" % (self.configuration.iscsi_target_prefix,
tgt['node'], target_name)
self.common.vip.basic.save_config()
properties = {}
properties['target_discovered'] = False
properties['target_portal'] = '%s:%d' \
% (tgt['addr'], self.configuration.iscsi_port)
properties['target_iqn'] = iqn
properties['target_lun'] = lun
properties['volume_id'] = volume['id']
properties['auth_method'] = 'CHAP'
properties['auth_username'] = ''
properties['auth_password'] = ''
return {'driver_volume_type': 'iscsi', 'data': properties}
def terminate_connection(self, volume, connector, force=False, **kwargs):
"""Terminates the connection (target<-->initiator)."""
if isinstance(volume, models.Volume):
self._unexport_lun(volume)
else:
self._unexport_snapshot(volume)
self.common.vip.basic.save_config()
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
if refresh or not self.stats:
self._update_stats()
return self.stats
def _create_iscsi_target_group(self):
"""Creates a new target for use in exporting a lun.
Create an HA target on the backend that will be used for all
lun exports made via this driver.
The equivalent CLI commands are "iscsi target create
<target_name>" and "iscsi target bind <target_name> to
<ip_of_mg_eth_intf>".
"""
v = self.common.vip
target_name = self.TARGET_GROUP_NAME
bn = "/vshare/config/iscsi/target/%s" % target_name
resp = self.common.vip.basic.get_node_values(bn)
if resp:
LOG.debug("iscsi target group %s already exists.", target_name)
return
LOG.debug("Creating iscsi target %s.", target_name)
try:
self.common._send_cmd_and_verify(v.iscsi.create_iscsi_target,
self._wait_for_target_state,
'', [target_name], [target_name])
except Exception:
LOG.exception(_LE("Failed to create iscsi target!"))
raise
try:
self.common._send_cmd(self.common.mga.iscsi.bind_ip_to_target,
'', target_name,
self.gateway_iscsi_ip_addresses_mga)
self.common._send_cmd(self.common.mgb.iscsi.bind_ip_to_target,
'', target_name,
self.gateway_iscsi_ip_addresses_mgb)
except Exception:
LOG.exception(_LE("Failed to bind iSCSI targets!"))
raise
def _get_iscsi_target(self):
"""Get a random target IP for OpenStack to connect to.
For the non-multipath case we pick a single random target for
the OpenStack infrastructure to use. This at least allows us
to evenly distribute LUN connections across the storage
cluster.
"""
return self.array_info[random.randint(0, len(self.array_info) - 1)]
@utils.synchronized('vmem-export')
def _export_lun(self, volume, connector=None, igroup=None):
"""Generates the export configuration for the given volume.
The equivalent CLI command is "lun export container
<container_name> name <lun_name>"
Arguments:
volume -- volume object provided by the Manager
connector -- connector object provided by the Manager
igroup -- name of igroup to use for exporting
Returns:
lun_id -- the LUN ID assigned by the backend
"""
lun_id = -1
export_to = ''
v = self.common.vip
if igroup:
export_to = igroup
elif connector:
export_to = connector['initiator']
else:
raise exception.Error(_("No initiators found, cannot proceed"))
target_name = self.TARGET_GROUP_NAME
LOG.debug("Exporting lun %s.", volume['id'])
try:
self.common._send_cmd_and_verify(
v.lun.export_lun, self.common._wait_for_export_state, '',
[self.common.container, volume['id'], target_name,
export_to, 'auto'], [volume['id'], None, True])
except Exception:
LOG.exception(_LE("LUN export for %s failed!"), volume['id'])
raise
lun_id = self.common._get_lun_id(volume['id'])
return lun_id
@utils.synchronized('vmem-export')
def _unexport_lun(self, volume):
"""Removes the export configuration for the given volume.
The equivalent CLI command is "no lun export container
<container_name> name <lun_name>"
Arguments:
volume -- volume object provided by the Manager
"""
v = self.common.vip
LOG.debug("Unexporting lun %s.", volume['id'])
try:
self.common._send_cmd_and_verify(
v.lun.unexport_lun, self.common._wait_for_export_state, '',
[self.common.container, volume['id'], 'all', 'all', 'auto'],
[volume['id'], None, False])
except exception.ViolinBackendErrNotFound:
LOG.debug("Lun %s already unexported, continuing.", volume['id'])
except Exception:
LOG.exception(_LE("LUN unexport for %s failed!"), volume['id'])
raise
@utils.synchronized('vmem-export')
def _export_snapshot(self, snapshot, connector=None, igroup=None):
"""Generates the export configuration for the given snapshot.
The equivalent CLI command is "snapshot export container
PROD08 lun <snapshot_name> name <volume_name>"
Arguments:
snapshot -- snapshot object provided by the Manager
connector -- connector object provided by the Manager
igroup -- name of igroup to use for exporting
Returns:
lun_id -- the LUN ID assigned by the backend
"""
lun_id = -1
export_to = ''
v = self.common.vip
target_name = self.TARGET_GROUP_NAME
LOG.debug("Exporting snapshot %s.", snapshot['id'])
if igroup:
export_to = igroup
elif connector:
export_to = connector['initiator']
else:
raise exception.Error(_("No initiators found, cannot proceed"))
try:
self.common._send_cmd(v.snapshot.export_lun_snapshot, '',
self.common.container, snapshot['volume_id'],
snapshot['id'], export_to, target_name,
'auto')
except Exception:
LOG.exception(_LE("Snapshot export for %s failed!"),
snapshot['id'])
raise
else:
self.common._wait_for_export_state(snapshot['volume_id'],
snapshot['id'], state=True)
lun_id = self.common._get_snapshot_id(snapshot['volume_id'],
snapshot['id'])
return lun_id
@utils.synchronized('vmem-export')
def _unexport_snapshot(self, snapshot):
"""Removes the export configuration for the given snapshot.
The equivalent CLI command is "no snapshot export container
PROD08 lun <snapshot_name> name <volume_name>"
Arguments:
snapshot -- snapshot object provided by the Manager
"""
v = self.common.vip
LOG.debug("Unexporting snapshot %s.", snapshot['id'])
try:
self.common._send_cmd(v.snapshot.unexport_lun_snapshot, '',
self.common.container, snapshot['volume_id'],
snapshot['id'], 'all', 'all', 'auto', False)
except Exception:
LOG.exception(_LE("Snapshot unexport for %s failed!"),
snapshot['id'])
raise
else:
self.common._wait_for_export_state(snapshot['volume_id'],
snapshot['id'], state=False)
def _add_igroup_member(self, connector, igroup):
"""Add an initiator to an igroup so it can see exports.
The equivalent CLI command is "igroup addto name <igroup_name>
initiators <initiator_name>"
Arguments:
connector -- connector object provided by the Manager
"""
v = self.common.vip
LOG.debug("Adding initiator %s to igroup.", connector['initiator'])
resp = v.igroup.add_initiators(igroup, connector['initiator'])
if resp['code'] != 0:
raise exception.Error(
_('Failed to add igroup member: %(code)d, %(message)s') % resp)
def _update_stats(self):
"""Update array stats.
Gathers array stats from the backend and converts them to GB values.
"""
data = {}
total_gb = 0
free_gb = 0
v = self.common.vip
master_cluster_id = list(v.basic.get_node_values(
'/cluster/state/master_id').values())[0]
bn1 = "/vshare/state/global/%s/container/%s/total_bytes" \
% (master_cluster_id, self.common.container)
bn2 = "/vshare/state/global/%s/container/%s/free_bytes" \
% (master_cluster_id, self.common.container)
resp = v.basic.get_node_values([bn1, bn2])
if bn1 in resp:
total_gb = resp[bn1] // units.Gi
else:
LOG.warning(_LW("Failed to receive update for total_gb stat!"))
if 'total_capacity_gb' in self.stats:
total_gb = self.stats['total_capacity_gb']
if bn2 in resp:
free_gb = resp[bn2] // units.Gi
else:
LOG.warning(_LW("Failed to receive update for free_gb stat!"))
if 'free_capacity_gb' in self.stats:
free_gb = self.stats['free_capacity_gb']
backend_name = self.configuration.volume_backend_name
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['vendor_name'] = 'Violin Memory, Inc.'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'iSCSI'
data['reserved_percentage'] = 0
data['QoS_support'] = False
data['total_capacity_gb'] = total_gb
data['free_capacity_gb'] = free_gb
for i in data:
LOG.debug("stat update: %(name)s=%(data)s.",
{'name': i, 'data': data[i]})
self.stats = data
def _get_short_name(self, volume_name):
"""Creates a vSHARE-compatible iSCSI target name.
The Folsom-style volume names are prefix(7) + uuid(36), which
is too long for vSHARE for target names. To keep things
simple we can just truncate the name to 32 chars.
Arguments:
volume_name -- name of volume/lun
Returns:
Shortened volume name as a string.
"""
return volume_name[:32]
def _get_active_iscsi_ips(self, mg_conn):
"""Get a list of gateway IP addresses that can be used for iSCSI.
Arguments:
mg_conn -- active XG connection to one of the gateways
Returns:
active_gw_iscsi_ips -- list of IP addresses
"""
active_gw_iscsi_ips = []
interfaces_to_skip = ['lo', 'vlan10', 'eth1', 'eth2', 'eth3']
bn = "/net/interface/config/*"
intf_list = mg_conn.basic.get_node_values(bn)
for i in intf_list:
if intf_list[i] in interfaces_to_skip:
continue
bn1 = "/net/interface/state/%s/addr/ipv4/1/ip" % intf_list[i]
bn2 = "/net/interface/state/%s/flags/link_up" % intf_list[i]
resp = mg_conn.basic.get_node_values([bn1, bn2])
if len(resp.keys()) == 2 and resp[bn2] is True:
active_gw_iscsi_ips.append(resp[bn1])
return active_gw_iscsi_ips
def _get_hostname(self, mg_to_query=None):
"""Get the hostname of one of the mgs (hostname is used in IQN).
If the remote query fails then fall back to using the hostname
provided in the cinder configuration file.
Arguments:
mg_to_query -- name of gateway to query 'mga' or 'mgb'
Returns: hostname -- hostname as a string
"""
hostname = self.configuration.san_ip
conn = self.common.vip
if mg_to_query == "mga":
hostname = self.configuration.gateway_mga
conn = self.common.mga
elif mg_to_query == "mgb":
hostname = self.configuration.gateway_mgb
conn = self.common.mgb
ret_dict = conn.basic.get_node_values("/system/hostname")
if ret_dict:
hostname = list(ret_dict.items())[0][1]
else:
LOG.debug("Unable to fetch gateway hostname for %s.", mg_to_query)
return hostname
def _wait_for_target_state(self, target_name):
"""Polls backend to verify an iscsi target configuration.
This function will try to verify the creation of an iscsi
target on both gateway nodes of the array every 5 seconds.
Arguments:
target_name -- name of iscsi target to be polled
Returns:
True if the target state was correctly added
"""
bn = "/vshare/state/local/target/iscsi/%s" % (target_name)
def _loop_func():
status = [False, False]
mg_conns = [self.common.mga, self.common.mgb]
LOG.debug("Entering _wait_for_target_state loop: target=%s.",
target_name)
for node_id in range(2):
resp = mg_conns[node_id].basic.get_node_values(bn)
if len(resp.keys()):
status[node_id] = True
if status[0] and status[1]:
raise loopingcall.LoopingCallDone(retvalue=True)
timer = loopingcall.FixedIntervalLoopingCall(_loop_func)
success = timer.start(interval=5).wait()
return success
| |
"""Record simulated nightly statistics by program.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import astropy.io.fits
import desiutil.log
import desisurvey.config
import desisurvey.utils
import desisurvey.tiles
import desisurvey.plots
class SurveyStatistics(object):
"""Collect nightly statistics by program.
Parameters
----------
start_date : datetime.date or None
Record statistics for a survey that starts on the evening of this date.
Uses the configured nominal start date when None.
stop_date : datetime.date
Record statistics for a survey that stops on the morning of this date.
Uses the configured nominal stop date when None.
restore : str or None
Restore internal state from the snapshot saved to this filename,
or initialize a new object when None. Use :meth:`save` to
save a snapshot to be restored later. Filename is relative to
the configured output path unless an absolute path is
provided.
"""
def __init__(self, start_date=None, stop_date=None, restore=None):
self.tiles = desisurvey.tiles.Tiles()
config = desisurvey.config.Configuration()
if start_date is None:
self.start_date = config.first_day()
else:
self.start_date = desisurvey.utils.get_date(start_date)
if stop_date is None:
self.stop_date = config.last_day()
else:
self.stop_date = desisurvey.utils.get_date(stop_date)
self.num_nights = (self.stop_date - self.start_date).days
if self.num_nights <= 0:
raise ValueError('Expected start_date < stop_date.')
# Build our internal array.
dtype = []
for name in 'MJD', 'tsched',:
dtype.append((name, np.float))
nprograms = len(self.tiles.programs)
for name in 'topen', 'tdead',:
dtype.append((name, np.float, (nprograms,)))
for name in 'tscience', 'tsetup', 'tsplit',:
dtype.append((name, np.float, (nprograms,)))
for name in 'completed', 'nexp', 'nsetup', 'nsplit', 'nsetup_abort', 'nsplit_abort',:
dtype.append((name, np.int32, (nprograms,)))
self._data = np.zeros(self.num_nights, dtype)
if restore is not None:
# Restore array contents from a FITS file.
fullname = config.get_path(restore)
with astropy.io.fits.open(fullname, memmap=None) as hdus:
header = hdus[1].header
comment = header['COMMENT']
if header['TILES'] != self.tiles.tiles_file:
raise ValueError('Header mismatch for TILES.')
if header['START'] != self.start_date.isoformat():
raise ValueError('Header mismatch for START.')
if header['STOP'] != self.stop_date.isoformat():
raise ValueError('Header mismatch for STOP.')
self._data[:] = hdus['STATS'].data
log = desiutil.log.get_logger()
log.info('Restored stats from {}'.format(fullname))
if comment:
log.info(' Comment: "{}".'.format(comment))
else:
# Initialize local-noon MJD timestamp for each night.
first_noon = desisurvey.utils.local_noon_on_date(self.start_date).mjd
self._data['MJD'] = first_noon + np.arange(self.num_nights)
def save(self, name='stats.fits', comment='', overwrite=True):
"""Save a snapshot of these statistics as a binary FITS table.
The saved file size is ~800 Kb.
Parameters
----------
name : str
File name to write. Will be located in the configuration
output path unless it is an absolute path. Pass the same
name to the constructor's ``restore`` argument to restore
this snapshot.
comment : str
Comment to include in the saved header, for documentation
purposes.
overwrite : bool
Silently overwrite any existing file when True.
"""
hdus = astropy.io.fits.HDUList()
header = astropy.io.fits.Header()
header['TILES'] = self.tiles.tiles_file
header['START'] = self.start_date.isoformat()
header['STOP'] = self.stop_date.isoformat()
header['COMMENT'] = comment
header['EXTNAME'] = 'STATS'
hdus.append(astropy.io.fits.PrimaryHDU())
hdus.append(astropy.io.fits.BinTableHDU(self._data, header=header, name='STATS'))
config = desisurvey.config.Configuration()
fullname = config.get_path(name)
hdus.writeto(fullname, overwrite=overwrite)
log = desiutil.log.get_logger()
log.info('Saved stats to {}'.format(fullname))
if comment:
log.info('Saved with comment "{}".'.format(header['COMMENT']))
@property
def nexp(self):
return self._data['nexp'].sum()
def get_night(self, night):
night = desisurvey.utils.get_date(night)
assert night < self.stop_date
idx = (night - self.start_date).days
return self._data[idx]
def validate(self):
D = self._data
# Every exposure must be preceded by a setup or split.
if not np.all(D['nexp'] == D['nsplit'] + D['nsetup']):
return False
# Sum live time per program over nights.
tlive = (D['topen'] - D['tdead']).sum(axis=1)
# Sum time spent in each state per program over nights.
ttotal = (D['tsetup'] + D['tscience'] + D['tsplit']).sum(axis=1)
return np.allclose(tlive, ttotal)
def summarize(self, nthday=None):
"""Print a tabular summary of the accumulated statistics to stdout.
"""
assert self.validate()
D = self._data
if nthday is None:
daysel = slice(None)
else:
daysel = D['MJD'] < np.min(D['MJD']) + nthday
D = D[daysel]
tsched = 24 * D['tsched'].sum()
topen = 24 * D['topen'].sum()
tscience = 24 * D['tscience'].sum()
print('Scheduled {:.3f} hr Open {:.3f}% Live {:.3f}%'.format(
tsched, 100 * topen / max(1e-6, tsched), 100 * tscience / max(1e-6, topen)))
print('=' * 82)
print('PROG TILES NEXP SETUP ABT SPLIT ABT TEXP TSETUP TSPLIT TOPEN TDEAD')
print('=' * 82)
# Summarize by program.
for program in self.tiles.programs:
progidx = self.tiles.program_index[program]
ntiles_p, ndone_p, nexp_p, nsetup_p, nsplit_p, nsetup_abort_p, nsplit_abort_p = [0] * 7
tscience_p, tsetup_p, tsplit_p = [0.] * 3
ntiles_all = 0
sel = progidx
ntiles = np.sum(self.tiles.program_mask[program])
ndone = D['completed'][:, sel].sum()
nexp = D['nexp'][:, sel].sum()
nsetup = D['nsetup'][:, sel].sum()
nsplit = D['nsplit'][:, sel].sum()
nsetup_abort = D['nsetup_abort'][:, sel].sum()
nsplit_abort = D['nsplit_abort'][:, sel].sum()
tscience = 86400 * D['tscience'][:, sel].sum() / max(1, ndone)
tsetup = 86400 * D['tsetup'][:, sel].sum() / max(1, ndone)
tsplit = 86400 * D['tsplit'][:, sel].sum() / max(1, ndone)
line = '{:6s} {} {:4d}/{:4d} {:5d} {:5d} {:3d} {:5d} {:3d} {:6.1f}s {:5.1f}s {:5.1f}s'.format(
program, ' ', ndone, ntiles, nexp, nsetup, nsetup_abort, nsplit, nsplit_abort, tscience, tsetup, tsplit)
print(line)
def plot(self, forecast=None):
"""Plot a summary of the survey statistics.
Requires that matplotlib is installed.
"""
import matplotlib.pyplot as plt
assert self.validate()
D = self._data
nprograms = len(self.tiles.programs)
# Find the last day of the survey.
last = np.argmax(np.cumsum(D['completed'].sum(axis=1))) + 1
tsetup = np.zeros((last, nprograms))
tsplit = np.zeros((last, nprograms))
ntiles = np.zeros(nprograms, int)
for program in self.tiles.programs:
progidx = self.tiles.program_index[program]
tsetup[:, progidx] += D['tsetup'][:last, progidx]
tsplit[:, progidx] += D['tsplit'][:last, progidx]
ntiles[progidx] += np.sum(self.tiles.program_mask[program])
actual = np.cumsum(D['completed'], axis=0)
dt = 1 + np.arange(len(D))
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 10))
ax = axes[0]
for program in self.tiles.programs:
programidx = self.tiles.program_index[program]
color = desisurvey.plots.program_color[program]
nprogram = np.sum(self.tiles.program_mask[program])
if forecast:
ax.plot(dt, 100 * forecast.program_progress[program] / nprogram, ':', c=color, lw=1)
ax.plot(dt[:last], 100 * actual[:last, programidx] / nprogram,
lw=3, alpha=0.5, c=color, label=program)
if forecast:
ax.plot([], [], 'b:', lw=1, label='forecast')
ax.legend(ncol=1)
ax.axvline(dt[last-1], ls='-', c='r')
ax.set_ylim(0, 100)
ax.set_ylabel('Completed [%]')
yaxis = ax.yaxis
yaxis.tick_right()
yaxis.set_label_position('right')
ax = axes[1]
# Plot overheads by program.
for program in self.tiles.programs:
progidx = self.tiles.program_index[program]
c = desisurvey.plots.program_color.get(program, 'purple')
scale = 86400 / ntiles[progidx] # secs / tile
ax.plot(dt[:last], scale * np.cumsum(tsetup[:, progidx]), '-', c=c)
ax.plot(dt[:last], scale * np.cumsum(tsplit[:, progidx]), '--', c=c)
ax.plot(dt[:last], scale * np.cumsum(D['tdead'][:last, progidx]), ':', c=c)
if forecast:
row = forecast.df.iloc[self.tiles.PROGRAM_INDEX[program]]
ax.scatter([dt[-1], dt[-1], dt[-1]], [
row['Setup overhead / tile (s)'],
row['Cosmic split overhead / tile (s)'],
row['Operations overhead / tile (s)']], s=50, lw=0, c=c)
ax.plot([], [], 'b-', label='setup')
ax.plot([], [], 'b--', label='split')
ax.plot([], [], 'b:', label='dead')
for program in self.tiles.programs:
ax.plot([], [], '-', c=desisurvey.plots.program_color[program], label=program)
ax.legend(ncol=2)
ax.axvline(dt[last-1], ls='-', c='r')
ax.set_xlabel('Elapsed Days')
ax.set_ylabel('Overhead / Tile [s]')
ax.set_xlim(0, dt[-1] + 1)
ax.set_ylim(0, None)
yaxis = ax.yaxis
yaxis.set_minor_locator(plt.MultipleLocator(10))
yaxis.tick_right()
yaxis.set_label_position('right')
plt.subplots_adjust(hspace=0.05)
return fig, axes
def plot_one_night(exps, tiledata, night, startdate, center_l=180):
import ephem
from astropy import units as u
from astropy.coordinates import SkyCoord, search_around_sky
from matplotlib import pyplot as p
startmjd = int(desisurvey.utils.local_noon_on_date(
desisurvey.utils.get_date(startdate)).mjd)
nightnum = night - startmjd
mstarted = (tiledata['PLANNED'] <= nightnum) & (tiledata['PLANNED'] >= 0)
tiles = desisurvey.tiles.get_tiles()
p.clf()
p.subplots_adjust(hspace=0)
p.subplots_adjust(left=0.1, right=0.9)
programs = ['DARK', 'BRIGHT']
expindex = tiles.index(exps['TILEID'])
expnight = exps['MJD'].astype('i4')
m = expnight == night
medianmjd = np.median(exps['MJD'][m])
mayall = ephem.Observer()
config = desisurvey.config.Configuration()
coord = SkyCoord(ra=tiles.tileRA*u.deg, dec=tiles.tileDEC*u.deg)
mayall.lon = config.location.longitude().to(u.radian).value
mayall.lat = config.location.latitude().to(u.radian).value
mayall.date = medianmjd+(2400000.5-2415020)
moon = ephem.Moon()
moon.compute(mayall)
tile_diameter = config.tile_radius()*2
for i, prog in enumerate(programs):
mprog = prog == tiles.tileprogram
mprogstarted = mstarted & mprog
p.subplot(len(programs), 1, i+1)
ra = ((tiles.tileRA - (center_l-180)) % 360)+(center_l-180)
p.plot(ra[mprog], tiles.tileDEC[mprog], '.', color='gray',
markersize=1)
p.plot(ra[mprogstarted], tiles.tileDEC[mprogstarted], '.',
color='green', markersize=5)
m = (expnight == night) & (tiles.tileprogram[expindex] == prog)
p.plot(ra[expindex[m]], tiles.tileDEC[expindex[m]], 'r-+')
idx1, idx2, sep2d, dist3d = search_around_sky(
coord[expindex[m]], coord[expindex[m]], tile_diameter*10)
mdiff = expindex[m][idx1] != expindex[m][idx2]
if np.sum(mdiff) > 0:
print(f'min separation {prog}: {np.min(sep2d[mdiff])}')
p.gca().set_aspect('equal')
p.plot(((np.degrees(moon.ra)-(center_l-180)) % 360)+(center_l-180),
np.degrees(moon.dec), 'o',
color='yellow', markersize=10,
markeredgecolor='black')
| |
'''
Created on Apr 3, 2017
@author: dearj019
'''
import os
import errno
from .imdb import imdb
import xml.dom.minidom as minidom
import numpy as np
import scipy.sparse
import scipy.io as sio
import pickle as cPickle
import subprocess
import uuid
from .generic_eval import generic_eval
import xml.etree.ElementTree as ET
_DEBUG = False
class generic(imdb):
def __init__(self, image_set, devkit_path, classes):
imdb.__init__(self, image_set)
self._image_set = image_set
self._devkit_path = devkit_path
self._data_path = os.path.join(self._devkit_path)
self._classes = ('__background__',) + tuple(classes)
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = ['.jpg', '.png']
self._image_index = self._load_image_set_index()
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
self._roidb_handler = self.gt_roidb
# Specific config options
self.config = {'cleanup' : True,
'use_salt' : True,
'top_k' : 2000,
'use_diff' : False,
'rpn_file' : None}
assert os.path.exists(self._devkit_path), \
'Devkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
for ext in self._image_ext:
image_path = os.path.join(self._data_path, 'Images',
index + ext)
if os.path.exists(image_path):
break
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._data_path + /ImageSets/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print(('{} gt roidb loaded from {}'.format(self.name, cache_file)))
return roidb
gt_roidb = [self._load_hands_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print(('wrote gt roidb to {}'.format(cache_file)))
return gt_roidb
def rpn_roidb(self):
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
#roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print(('loading {}'.format(filename)))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = cPickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_hands_annotation(self, index):
"""
Load image and bounding boxes info from txt files of INRIAPerson.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
# print 'Loading: {}'.format(filename)
with open(filename) as f:
data = f.read()
#newly done
tree = ET.parse(filename)
root = tree.getroot()
objs = []
for element in root.iter('object'):
name = element.find('name').text
if name in self._classes:
bndbox = element.find('bndbox')
xmin = bndbox.find("xmin").text
ymin = bndbox.find("ymin").text
xmax = bndbox.find("xmax").text
ymax = bndbox.find("ymax").text
xmin_i = int(float(xmin))
xmax_i = int(float(xmax))
ymin_i = int(float(ymin))
ymax_i = int(float(ymax))
area = (xmax_i - xmin_i) * (ymax_i - ymin_i)
if xmin_i < 2:
xmin = '2'
xmin_i = 2
print(filename)
if ymin_i < 2:
ymin = '2'
ymin_i = 2
print(filename)
#if xmax_i > xmin_i and ymax_i > ymin_i and area > 250:
# objs.append([xmin, ymin, xmax, ymax])
objs.append([xmin, ymin, xmax, ymax, name])
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area here is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, coor in enumerate(objs):
# Make pixel indexes 0-based
x1 = float(coor[0])
y1 = float(coor[1])
x2 = float(coor[2])
y2 = float(coor[3])
cls = self._class_to_ind[coor[4]]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
if _DEBUG:
print(boxes)
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_overlaps' : overlaps,
'flipped' : False,
'seg_areas' : seg_areas}
def _write_hands_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print(('Writing {} results file'.format(cls)))
filename = self._get_hands_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def evaluate_detections(self, all_boxes, output_dir):
self._write_hands_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_hands_results_file_template().format(cls)
os.remove(filename)
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_hands_results_file_template(self):
# INRIAdevkit/results/comp4-44503_det_test_{%s}.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
try:
os.mkdir(self._devkit_path + '/results')
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise e
path = os.path.join(
self._devkit_path,
'results',
filename)
return path
def _do_python_eval(self, output_dir = 'output'):
annopath = os.path.join(
self._data_path,
'Annotations',
'{:s}.txt')
imagesetfile = os.path.join(
self._data_path,
'ImageSets',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_hands_results_file_template().format(cls)
rec, prec, ap = generic_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5)
aps += [ap]
print(('AP for {} = {:.4f}'.format(cls, ap)))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
| |
# Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for perfkitbenchmarker.packages.ycsb"""
import copy
import os
import unittest
from perfkitbenchmarker.linux_packages import ycsb
class SimpleResultParserTestCase(unittest.TestCase):
maxDiff = None
def setUp(self):
path = os.path.join(os.path.dirname(__file__), '..', 'data',
'ycsb-test-run.dat')
with open(path) as fp:
self.contents = fp.read()
self.results = ycsb.ParseResults(self.contents, 'histogram')
def testCommandLineSet(self):
self.assertEqual('Command line: -db com.yahoo.ycsb.BasicDB '
'-P workloads/workloada -t', self.results['command_line'])
def testClientSet(self):
self.assertEqual('YCSB Client 0.1', self.results['client'])
def testUpdateStatisticsParsed(self):
self.assertDictEqual(
{
'group': 'update',
'statistics': {
'Operations': 531,
'Return=0': 531,
'AverageLatency(ms)': .0659774011299435,
'MinLatency(ms)': 0.042,
'MaxLatency(ms)': .345,
'95thPercentileLatency(ms)': 0,
'99thPercentileLatency(ms)': 0
},
'histogram': [(0, 530), (19, 1)],
},
dict(self.results['groups']['update']))
def testReadStatisticsParsed(self):
self.assertDictEqual(
{
'group': 'read',
'statistics': {
'Operations': 469,
'Return=0': 469,
'AverageLatency(ms)': 0.03847761194029851,
'MinLatency(ms)': 0.034,
'MaxLatency(ms)': 0.102,
'95thPercentileLatency(ms)': 0,
'99thPercentileLatency(ms)': 0
},
'histogram': [(0, 469)],
},
dict(self.results['groups']['read']))
def testOverallStatisticsParsed(self):
self.assertDictEqual(
{
'statistics': {
'RunTime(ms)': 80.0,
'Throughput(ops/sec)': 12500.0
},
'group': 'overall',
'histogram': []
},
self.results['groups']['overall'])
class DetailedResultParserTestCase(unittest.TestCase):
def setUp(self):
path = os.path.join(os.path.dirname(__file__), '..', 'data',
'ycsb-test-run-2.dat')
with open(path) as fp:
self.contents = fp.read()
self.results = ycsb.ParseResults(self.contents)
def testPercentilesFromHistogram_read(self):
hist = self.results['groups']['read']['histogram']
percentiles = ycsb._PercentilesFromHistogram(hist)
self.assertEqual(1, percentiles['p50'])
self.assertEqual(7, percentiles['p99'])
def testPercentilesFromHistogram_update(self):
hist = self.results['groups']['update']['histogram']
percentiles = ycsb._PercentilesFromHistogram(hist)
self.assertEqual(1, percentiles['p50'])
self.assertEqual(7, percentiles['p99'])
class WeightedQuantileTestCase(unittest.TestCase):
def testEvenlyWeightedSamples(self):
x = range(1, 101) # 1-100
weights = [1 for _ in x]
self.assertEqual(50, ycsb._WeightedQuantile(x, weights, 0.50))
self.assertEqual(75, ycsb._WeightedQuantile(x, weights, 0.75))
self.assertEqual(90, ycsb._WeightedQuantile(x, weights, 0.90))
self.assertEqual(95, ycsb._WeightedQuantile(x, weights, 0.95))
self.assertEqual(99, ycsb._WeightedQuantile(x, weights, 0.99))
self.assertEqual(100, ycsb._WeightedQuantile(x, weights, 1))
def testLowWeight(self):
x = [1, 4]
weights = [99, 1]
for i in xrange(100):
self.assertEqual(1, ycsb._WeightedQuantile(x, weights, i / 100.0))
self.assertEqual(4, ycsb._WeightedQuantile(x, weights, 0.995))
def testMidWeight(self):
x = [0, 1.2, 4]
weights = [1, 98, 1]
for i in xrange(2, 99):
self.assertAlmostEqual(1.2, ycsb._WeightedQuantile(x, weights, i / 100.0))
self.assertEqual(4, ycsb._WeightedQuantile(x, weights, 0.995))
class ParseWorkloadTestCase(unittest.TestCase):
def testParsesEmptyString(self):
self.assertDictEqual({}, ycsb._ParseWorkload(''))
def testIgnoresComment(self):
self.assertDictEqual({}, ycsb._ParseWorkload('#\n'))
self.assertDictEqual({},
ycsb._ParseWorkload('#recordcount = 10\n'
'# columnfamily=cf'))
self.assertDictEqual({'recordcount': '10'},
ycsb._ParseWorkload('#Sample!\nrecordcount = 10'))
def testParsesSampleWorkload(self):
test_file_path = os.path.join(os.path.dirname(__file__), '..', 'data',
'ycsb_workloada')
with open(test_file_path) as fp:
contents = fp.read()
actual = ycsb._ParseWorkload(contents)
expected = {
'recordcount': '1000',
'operationcount': '1000',
'workload': 'com.yahoo.ycsb.workloads.CoreWorkload',
'readallfields': 'true',
'readproportion': '0.5',
'updateproportion': '0.5',
'scanproportion': '0',
'insertproportion': '0',
'requestdistribution': 'zipfian'
}
self.assertDictEqual(expected, actual)
class CombineResultsTestCase(unittest.TestCase):
def testGroupMissing(self):
r1 = {
'client': '',
'command_line': '',
'groups': {
'read': {
'group': 'read',
'statistics': {'Operations': 100,
'Return=0': 100},
'histogram': []
}
}
}
r2 = {
'client': '',
'command_line': '',
'groups': {
'read': {
'group': 'read',
'statistics': {'Operations': 96, 'Return=0': 94,
'Return=-1': 2},
'histogram': []
},
'update': {
'group': 'update',
'statistics': {'Operations': 100,
'AverageLatency(ms)': 25},
'histogram': []
}
}
}
combined = ycsb._CombineResults([r1, r2])
self.assertItemsEqual(['read', 'update'], combined['groups'])
self.assertItemsEqual(['Operations', 'Return=0', 'Return=-1'],
combined['groups']['read']['statistics'])
read_stats = combined['groups']['read']['statistics']
self.assertEqual({'Operations': 196, 'Return=0': 194, 'Return=-1': 2},
read_stats)
def testDropUnaggregatedFromSingleResult(self):
r = {
'client': '',
'command_line': '',
'groups': {
'read': {
'group': 'read',
'statistics': {'AverageLatency(ms)': 21},
'histogram': []
}
}
}
r_copy = copy.deepcopy(r)
self.assertEqual(r, r_copy)
combined = ycsb._CombineResults([r])
self.assertEqual(r, r_copy)
r['groups']['read']['statistics'] = {}
self.assertEqual(r, combined)
| |
#!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Project Class."""
from importlib import import_module
from pymodm import fields, EmbeddedMongoModel
from pymodm.errors import ValidationError
import empower_core.serialize as serialize
from empower_core.envmanager.env import Env
from empower_core.etheraddress import EtherAddress
from empower_core.acl import ACL
from empower_core.plmnid import PLMNIDField
from empower_core.ssid import SSIDField
from empower_core.launcher import srv_or_die
from empower_core.serialize import serializable_dict
from empower_core.app import EApp
from empower.managers.ranmanager.lvapp.wifislice import WiFiSlice
from empower.managers.ranmanager.vbsp.lteslice import LTESlice
T_BSSID_TYPE_SHARED = "shared"
T_BSSID_TYPE_UNIQUE = "unique"
T_BSSID_TYPE_TYPES = [T_BSSID_TYPE_SHARED, T_BSSID_TYPE_UNIQUE]
T_STA_SCHED_RR = 0
T_STA_SCHED_DRR = 1
T_STA_SCHED_ADRR = 1
T_STA_SCHED_TYPES = [T_STA_SCHED_RR, T_STA_SCHED_DRR, T_STA_SCHED_ADRR]
T_UE_SCHED_RR = 0
T_UE_SCHED_TYPES = [T_UE_SCHED_RR]
class ACLDictField(fields.DictField):
"""A field that stores a regular Python dictionary."""
def to_mongo(self, value):
try:
return serialize.serialize(value)
except ValueError as ex:
raise ValidationError(ex)
def to_python(self, value):
try:
out = {}
for acl in value.values():
if not isinstance(acl, ACL):
acl = ACL(**acl)
out[str(acl.addr)] = acl
return out
except ValueError as ex:
raise ValidationError(ex)
class WiFiSlicesDictField(fields.DictField):
"""A field that stores a regular Python dictionary."""
def to_mongo(self, value):
try:
return serialize.serialize(value)
except ValueError as ex:
raise ValidationError(ex)
def to_python(self, value):
try:
out = {}
for slc in value.values():
if not isinstance(slc, WiFiSlice):
slc = WiFiSlice(**slc)
out[str(slc.slice_id)] = slc
return out
except ValueError as ex:
raise ValidationError(ex)
class LTESlicesDictField(fields.DictField):
"""A field that stores a regular Python dictionary."""
def to_mongo(self, value):
try:
return serialize.serialize(value)
except ValueError as ex:
raise ValidationError(ex)
def to_python(self, value):
try:
out = {}
for slc in value.values():
if not isinstance(slc, LTESlice):
slc = LTESlice(**slc)
out[str(slc.slice_id)] = slc
return out
except ValueError as ex:
raise ValidationError(ex)
class EmbeddedWiFiProps(EmbeddedMongoModel):
"""Embedded Wi-Fi Properties."""
ssid = SSIDField(required=True)
bssid_type = fields.CharField(required=False,
choices=T_BSSID_TYPE_TYPES,
default=T_BSSID_TYPE_UNIQUE)
allowed = ACLDictField(required=False, blank=True)
def to_dict(self):
""" Return a JSON-serializable dictionary """
output = {}
output['ssid'] = self.ssid
output['bssid_type'] = self.bssid_type
output['allowed'] = self.allowed
return output
class EmbeddedLTEProps(EmbeddedMongoModel):
"""Embedded LTE Properties."""
plmnid = PLMNIDField(required=True)
def to_dict(self):
""" Return a JSON-serializable dictionary """
output = {}
output['plmnid'] = self.plmnid
return output
@serializable_dict
class EmpowerProject(Env):
"""Empower Project class.
Attributes:
owner: The username of the user that requested this pool
wifi_props: The Wi-Fi properties
lte_props: The LTE properties
wifi_slices: The definition of the Wi-Fi slices
lte_slices: The definition of the Wi-Fi slices
The Wi-Fi properties are defined starting from a JSON document like the
following:
{
"ssid": "EmPOWER",
"allowed": {
"04:46:65:49:e0:1f": {
"addr": "04:46:65:49:e0:1f",
"desc": "Some laptop"
},
"04:46:65:49:e0:11": {
"addr": "04:46:65:49:e0:1f",
"desc": "Some other laptop"
},
"04:46:65:49:e0:12": {
"addr": "04:46:65:49:e0:1f",
"desc": "Yet another laptop"
}
}
"bssid_type": "unique"
}
The LTE properties are defined starting from a JSON document like the
following:
{
"plmnid": "00101"
}
A Wi-Fi slice is defined starting from a JSON document like the
following:
{
"slice_id": "0x42",
"properties": {
"amsdu_aggregation": true,
"quantum": 12000,
"sta_scheduler": 1
}
}
The descriptor above will create a slice with id 0x42 on every WTP.
In some cases it may be required to use different slice parameters only on
certain WTPs. This can be done using a descriptor like the following:
{
"slice_id": "0x42",
"properties": {
"amsdu_aggregation": true,
"quantum": 12000,
"sta_scheduler": 1
}
"devices": {
"00:0D:B9:2F:56:64": {
"quantum": 15000
}
}
}
In this case the slice is still created on all the WTPs in the network,
but some slice parameters are different for the specified nodes.
Similarly, an LTE slice is defined starting from a JSON document like the
following:
{
"slice_id": "0x42",
"properties": {
"rbgs": 5,
"ue_scheduler": 1
},
"devices": {
"aa:bb:cc:dd:ee:ff": {
rbgs": 2
}
}
}
"""
owner = fields.CharField(required=True)
desc = fields.CharField(required=True)
wifi_props = fields.EmbeddedDocumentField(EmbeddedWiFiProps)
lte_props = fields.EmbeddedDocumentField(EmbeddedLTEProps)
wifi_slices = WiFiSlicesDictField(required=False, blank=True)
lte_slices = LTESlicesDictField(required=False, blank=True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Save pointer to ProjectManager
self.manager = srv_or_die("projectsmanager")
@property
def vbses(self):
"""Return the VBSes."""
return srv_or_die("vbspmanager").devices
@property
def wtps(self):
"""Return the WTPs."""
return srv_or_die("lvappmanager").devices
@property
def users(self):
"""Return the UEs."""
if not self.lte_props:
return {}
users = {k: v for k, v in srv_or_die("vbspmanager").users.items()
if v.plmnid == self.lte_props.plmnid}
return users
@property
def lvaps(self):
"""Return the LVAPs."""
if not self.wifi_props:
return {}
lvaps = {k: v for k, v in srv_or_die("lvappmanager").lvaps.items()
if v.ssid == self.wifi_props.ssid}
return lvaps
@property
def vaps(self):
"""Return the VAPs."""
if not self.wifi_props:
return {}
vaps = {k: v for k, v in srv_or_die("lvappmanager").vaps.items()
if v.ssid == self.wifi_props.ssid}
return vaps
def load_service(self, service_id, name, params):
"""Load a service instance."""
init_method = getattr(import_module(name), "launch")
service = init_method(context=self, service_id=service_id, **params)
if not isinstance(service, EApp):
raise ValueError("Service %s not EApp type" % name)
return service
def upsert_acl(self, addr, desc):
"""Upsert ACL."""
acl = ACL(addr=addr, desc=desc)
self.wifi_props.allowed[str(acl.addr)] = acl
self.save()
return acl
def remove_acl(self, addr=None):
"""Upsert new slice."""
if addr:
del self.wifi_props.allowed[str(addr)]
else:
for k in list(self.wifi_props.allowed.keys()):
del self.wifi_props.allowed[k]
self.save()
def upsert_wifi_slice(self, **kwargs):
"""Upsert new slice."""
slc = WiFiSlice(**kwargs)
for wtp in self.wtps.values():
for block in wtp.blocks.values():
wtp.connection.send_set_slice(self, slc, block)
self.wifi_slices[str(slc.slice_id)] = slc
self.save()
self.refresh_from_db()
return slc.slice_id
def upsert_lte_slice(self, **kwargs):
"""Upsert new slice."""
slc = LTESlice(**kwargs)
for vbs in self.vbses.values():
for cell in vbs.cells.values():
vbs.connection.send_set_slice(self, slc, cell)
self.lte_slices[str(slc.slice_id)] = slc
self.save()
self.refresh_from_db()
return slc.slice_id
def delete_wifi_slice(self, slice_id):
"""Delete slice."""
if slice_id == "0":
raise ValueError("Slice 0 cannot be deleted")
slc = self.wifi_slices[slice_id]
for wtp in self.wtps.values():
for block in wtp.blocks.values():
wtp.connection.send_del_slice(self, slc.slice_id, block)
del self.wifi_slices[slice_id]
self.save()
self.refresh_from_db()
def delete_lte_slice(self, slice_id):
"""Delete slice."""
if slice_id == "0":
raise ValueError("Slice 0 cannot be deleted")
slc = self.lte_slices[slice_id]
for vbs in self.vbses.values():
for cell in vbs.cells.values():
vbs.connection.send_del_slice(self, slc.slice_id, cell)
del self.lte_slices[slice_id]
self.save()
self.refresh_from_db()
def to_dict(self):
"""Return JSON-serializable representation of the object."""
output = super().to_dict()
output['owner'] = self.owner
output['desc'] = self.desc
output['wifi_props'] = \
self.wifi_props.to_dict() if self.wifi_props else None
output['lte_props'] = \
self.lte_props.to_dict() if self.lte_props else None
output['wifi_slices'] = \
self.wifi_slices if self.wifi_slices else None
output['lte_slices'] = \
self.lte_slices if self.lte_slices else None
return output
def get_prefix(self):
"""Return tenant prefix."""
tokens = [self.project_id.hex[0:12][i:i + 2] for i in range(0, 12, 2)]
return EtherAddress(':'.join(tokens))
def generate_bssid(self, mac):
""" Generate a new BSSID address. """
base_mac = self.get_prefix()
base = str(base_mac).split(":")[0:3]
unicast_addr_mask = int(base[0], 16) & 0xFE
base[0] = str(format(unicast_addr_mask, 'X'))
suffix = str(mac).split(":")[3:6]
return EtherAddress(":".join(base + suffix))
| |
import re
import sys
import gam
from gam.var import *
from gam import controlflow
from gam import display
from gam import gapi
from gam.gapi import errors as gapi_errors
from gam.gapi.directory import customer as gapi_directory_customer
def _get_customerid():
''' returns customerId with format C{customer_id}'''
gapi_directory_customer.setTrueCustomerId()
customer_id = GC_Values[GC_CUSTOMER_ID]
if customer_id[0] != 'C':
customer_id = 'C' + customer_id
return customer_id
def build():
return gam.buildGAPIObject('licensing')
def getProductAndSKU(sku):
l_sku = sku.lower().replace('-', '').replace(' ', '')
for a_sku, sku_values in list(SKUS.items()):
if l_sku == a_sku.lower().replace(
'-',
'') or l_sku in sku_values['aliases'] or l_sku == sku_values[
'displayName'].lower().replace(' ', ''):
return (sku_values['product'], a_sku)
try:
product = re.search('^([A-Z,a-z]*-[A-Z,a-z]*)', sku).group(1)
except AttributeError:
product = sku
return (product, sku)
def user_lic_result(request_id, response, exception):
if exception:
http_status, reason, message = gapi_errors.get_gapi_error_detail(
exception,
soft_errors=True)
print(f'ERROR: {request_id}: {http_status} - {reason} {message}')
def create(users, sku=None):
lic = build()
if not sku:
sku = sys.argv[5]
productId, skuId = getProductAndSKU(sku)
sku_name = _formatSKUIdDisplayName(skuId)
i = 6
if len(sys.argv) > 6 and sys.argv[i].lower() in ['product', 'productid']:
productId = sys.argv[i+1]
i += 2
for user in users:
print(f'Adding license {sku_name} from to {user}')
gapi.call(lic.licenseAssignments(),
'insert',
soft_errors=True,
productId=productId,
skuId=skuId,
body={'userId': user})
def delete(users, sku=None):
lic = build()
if not sku:
sku = sys.argv[5]
productId, skuId = getProductAndSKU(sku)
sku_name = _formatSKUIdDisplayName(skuId)
i = 6
if len(sys.argv) > 6 and sys.argv[i].lower() in ['product', 'productid']:
productId = sys.argv[i+1]
i += 2
for user in users:
print(f'Removing license {sku_name} from user {user}')
gapi.call(lic.licenseAssignments(),
'delete',
soft_errors=True,
productId=productId,
skuId=skuId,
userId=user)
def sync(users):
sku = sys.argv[5]
current_licenses = gam.getUsersToModify(entity_type='license',
entity=sku)
users_to_license = [user for user in users if user not in current_licenses]
users_to_unlicense = [user for user in current_licenses if user not in users]
print(f'Need to remove license from {len(users_to_unlicense)} and add to ' \
f'{len(users_to_license)} users...')
# do the remove first to free up seats
delete(users_to_unlicense, sku)
create(users_to_license, sku)
def update(users, sku=None, old_sku=None):
lic = build()
if not sku:
sku = sys.argv[5]
productId, skuId = getProductAndSKU(sku)
sku_name = _formatSKUIdDisplayName(skuId)
i = 6
if len(sys.argv) > 6 and sys.argv[i].lower() in ['product', 'productid']:
productId = sys.argv[i+1]
i += 2
if not old_sku:
try:
old_sku = sys.argv[i]
if old_sku.lower() == 'from':
old_sku = sys.argv[i + 1]
except KeyError:
controlflow.system_error_exit(
2,
'You need to specify the user\'s old SKU as the last argument'
)
_, old_sku = getProductAndSKU(old_sku)
old_sku_name = _formatSKUIdDisplayName(old_sku)
for user in users:
print(f'Changing user {user} from license {old_sku_name} to {sku_name}')
gapi.call(lic.licenseAssignments(),
'patch',
soft_errors=True,
productId=productId,
skuId=old_sku,
userId=user,
body={'skuId': skuId})
def print_(returnFields=None,
skus=None,
countsOnly=False,
returnCounts=False):
lic = build()
customer_id = _get_customerid()
products = []
licenses = []
licenseCounts = []
if not returnFields:
csvRows = []
todrive = False
i = 3
while i < len(sys.argv):
myarg = sys.argv[i].lower()
if not returnCounts and myarg == 'todrive':
todrive = True
i += 1
elif myarg in ['products', 'product']:
products = sys.argv[i + 1].split(',')
i += 2
elif myarg in ['sku', 'skus']:
skus = sys.argv[i + 1].split(',')
i += 2
elif myarg == 'allskus':
skus = sorted(SKUS)
products = []
i += 1
elif myarg == 'gsuite':
skus = [
skuId for skuId in SKUS
if SKUS[skuId]['product'] in ['Google-Apps', '101031']
]
products = []
i += 1
elif myarg == 'countsonly':
countsOnly = True
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i],
'gam print licenses')
if not countsOnly:
fields = 'nextPageToken,items(productId,skuId,userId)'
titles = ['userId', 'productId', 'skuId']
else:
fields = 'nextPageToken,items(userId)'
if not returnCounts:
if skus:
titles = ['productId', 'skuId', 'licenses']
else:
titles = ['productId', 'licenses']
else:
fields = f'nextPageToken,items({returnFields})'
if skus:
for sku in skus:
if not products:
product, sku = getProductAndSKU(sku)
else:
product = products[0]
page_message = gapi.got_total_items_msg(
f'Licenses for {SKUS.get(sku, {"displayName": sku})["displayName"]}',
'...\n')
try:
licenses += gapi.get_all_pages(
lic.licenseAssignments(),
'listForProductAndSku',
'items',
throw_reasons=[
gapi_errors.ErrorReason.INVALID,
gapi_errors.ErrorReason.FORBIDDEN
],
page_message=page_message,
customerId=customer_id,
productId=product,
skuId=sku,
fields=fields)
if countsOnly:
licenseCounts.append([
'Product', product, 'SKU', sku, 'Licenses',
len(licenses)
])
licenses = []
except (gapi_errors.GapiInvalidError,
gapi_errors.GapiForbiddenError):
pass
else:
if not products:
products = sorted(PRODUCTID_NAME_MAPPINGS)
for productId in products:
page_message = gapi.got_total_items_msg(
f'Licenses for {PRODUCTID_NAME_MAPPINGS.get(productId, productId)}',
'...\n')
try:
licenses += gapi.get_all_pages(
lic.licenseAssignments(),
'listForProduct',
'items',
throw_reasons=[
gapi_errors.ErrorReason.INVALID,
gapi_errors.ErrorReason.FORBIDDEN
],
page_message=page_message,
customerId=customer_id,
productId=productId,
fields=fields)
if countsOnly:
licenseCounts.append(
['Product', productId, 'Licenses',
len(licenses)])
licenses = []
except (gapi_errors.GapiInvalidError,
gapi_errors.GapiForbiddenError):
pass
if countsOnly:
if returnCounts:
return licenseCounts
if skus:
for u_license in licenseCounts:
csvRows.append({
'productId': u_license[1],
'skuId': u_license[3],
'licenses': u_license[5]
})
else:
for u_license in licenseCounts:
csvRows.append({
'productId': u_license[1],
'licenses': u_license[3]
})
display.write_csv_file(csvRows, titles, 'Licenses', todrive)
return
if returnFields:
if returnFields == 'userId':
userIds = []
for u_license in licenses:
userId = u_license.get('userId', '').lower()
if userId:
userIds.append(userId)
return userIds
userSkuIds = {}
for u_license in licenses:
userId = u_license.get('userId', '').lower()
skuId = u_license.get('skuId')
if userId and skuId:
userSkuIds.setdefault(userId, [])
userSkuIds[userId].append(skuId)
return userSkuIds
for u_license in licenses:
userId = u_license.get('userId', '').lower()
skuId = u_license.get('skuId', '')
csvRows.append({
'userId': userId,
'productId': u_license.get('productId', ''),
'skuId': _skuIdToDisplayName(skuId)
})
display.write_csv_file(csvRows, titles, 'Licenses', todrive)
def show():
licenseCounts = print_(countsOnly=True, returnCounts=True)
for u_license in licenseCounts:
line = ''
for i in range(0, len(u_license), 2):
line += f'{u_license[i]}: {u_license[i+1]}, '
print(line[:-2])
def _skuIdToDisplayName(skuId):
return SKUS[skuId]['displayName'] if skuId in SKUS else skuId
def _formatSKUIdDisplayName(skuId):
skuIdDisplay = _skuIdToDisplayName(skuId)
if skuId == skuIdDisplay:
return skuId
return f'{skuId} ({skuIdDisplay})'
| |
from __future__ import print_function
import gc
import inspect
import math, copy
import os
import sys
import threading
import multiprocessing
import concurrent.futures
import time
import random
import pygreentea
# Determine where PyGreentea is
pygtpath = os.path.normpath(os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])))
rootpath = os.path.dirname(pygtpath)
# Determine where PyGreentea gets called from
cmdpath = os.getcwd()
sys.path.append(pygtpath)
sys.path.append(pygtpath + '/..')
sys.path.append(cmdpath)
print(os.path.dirname(pygreentea.__file__))
import h5py
import numpy as np
import png
from scipy import io
# Load the configuration file
import config
from numpy import float32, int32, uint8
# Import Caffe
pycaffepath = ''
if (os.path.isabs(config.caffe_path)):
pycaffepath = config.caffe_path + '/python'
else:
pycaffepath = rootpath + '/' + config.caffe_path + '/python'
sys.path.append(pycaffepath)
import caffe as caffe
# Import the network generator
import netgen
from netgen import metalayers
from netgen import fix_input_dims
# Import Malis
if (os.path.isabs(config.malis_path)):
sys.path.append(config.malis_path)
else:
sys.path.append(pygtpath+'/'+config.malis_path)
import malis as malis
def minidx(data, index):
return data[min(len(data) - 1, index)]
# Wrapper around a networks set_input_arrays to prevent memory leaks of locked up arrays
class NetInputWrapper:
def __init__(self, net, input_specs={}, output_specs={}):
self.net = net
self.input_specs = input_specs
self.output_specs = output_specs
self.inputs = {}
for set_key in self.input_specs.keys():
shape = self.input_specs[set_key].shape
# Pre-allocate arrays that will persist with the network
self.inputs[set_key] = np.zeros(tuple(shape), dtype=float32)
def set_inputs(self, data):
for set_key in self.input_specs.keys():
np.copyto(self.inputs[set_key], np.ascontiguousarray(data[set_key]).astype(float32))
self.net.set_layer_input_arrays(self.input_specs[set_key].memory_layer, self.inputs[set_key], None)
def get_outputs(self):
outputs = {}
for set_key in self.output_specs.keys():
outputs[set_key] = self.output_specs[set_key].blob.data
return outputs
# Transfer network weights from one network to another
def net_weight_transfer(dst_net, src_net):
# Go through all source layers/weights
for layer_key in src_net.params:
# Test existence of the weights in destination network
if (layer_key in dst_net.params):
# Copy weights + bias
for i in range(0, min(len(dst_net.params[layer_key]), len(src_net.params[layer_key]))):
np.copyto(dst_net.params[layer_key][i].data, src_net.params[layer_key][i].data)
def normalize(dataset, newmin=-1, newmax=1):
maxval = dataset
while len(maxval.shape) > 0:
maxval = maxval.max(0)
minval = dataset
while len(minval.shape) > 0:
minval = minval.min(0)
return ((dataset - minval) / (maxval - minval)) * (newmax - newmin) + newmin
def get_solver_states(prefix):
files = [f for f in os.listdir('.') if os.path.isfile(f)]
print(files)
solverstates = []
for file in files:
if(prefix+'_iter_' in file and '.solverstate' in file):
solverstates += [(int(file[len(prefix+'_iter_'):-len('.solverstate')]),file)]
return sorted(solverstates)
def get_caffe_models(prefix):
files = [f for f in os.listdir('.') if os.path.isfile(f)]
print(files)
caffemodels = []
for file in files:
if(prefix+'_iter_' in file and '.caffemodel' in file):
caffemodels += [(int(file[len(prefix+'_iter_'):-len('.caffemodel')]),file)]
return sorted(caffemodels)
def scale_errors(data, factor_low, factor_high):
scaled_data = np.add((data >= 0.5) * factor_high, (data < 0.5) * factor_low)
return scaled_data
def count_affinity(dataset):
aff_high = np.sum(dataset >= 0.5)
aff_low = np.sum(dataset < 0.5)
return aff_high, aff_low
def border_reflect(dataset, border):
return np.pad(dataset,((border, border)),'reflect')
def slice_data(data, offsets, sizes):
"""
data should be of shape [#feature maps (channels), spatial Z, spatial Y, spatial X]
offsets and sizes should be of shape [spatial Z, spatial Y, spatial X]
The number of spatial dimensions can vary
"""
slicing = [slice(0, data.shape[0])] + [slice(offsets[i], offsets[i]+sizes[i]) for i in range(0, min(len(offsets),len(data.shape)-1))]
return data[slicing]
def set_slice_data(data, insert_data, offsets, sizes):
slicing = [slice(0, data.shape[0])] + [slice(offsets[i], offsets[i]+sizes[i]) for i in range(0, min(len(offsets),len(data.shape)-1))]
data[slicing] = insert_data
def sanity_check_net_blobs(net):
for key in net.blobs.keys():
dst = net.blobs[key]
data = np.ndarray.flatten(dst.data[0].copy())
print('Blob: %s; %s' % (key, data.shape))
failure = False
first = -1
for i in range(0,data.shape[0]):
if abs(data[i]) > 1000:
failure = True
if first == -1:
first = i
print('Failure, location %d; objective %d' % (i, data[i]))
print('Failure: %s, first at %d, mean %3.5f' % (failure,first,np.mean(data)))
if failure:
break
def dump_feature_maps(net, folder):
for key in net.blobs.keys():
dst = net.blobs[key]
norm = normalize(dst.data[0], 0, 255)
# print(norm.shape)
for f in range(0,norm.shape[0]):
outfile = open(folder+'/'+key+'_'+str(f)+'.png', 'wb')
writer = png.Writer(norm.shape[2], norm.shape[1], greyscale=True)
# print(np.uint8(norm[f,:]).shape)
writer.write(outfile, np.uint8(norm[f,:]))
outfile.close()
def dump_tikzgraph_maps(net, folder):
xmaps = 2
ymaps = 2
padding = 12
for key in net.blobs.keys():
dst = net.blobs[key]
norm = normalize(dst.data[0], 0, 255)
width = xmaps*norm.shape[2]+(xmaps-1)*padding
height = ymaps*norm.shape[2]+(ymaps-1)*padding
mapout = np.ones((width,height))*255
# print(norm.shape)
for f in range(0,xmaps * ymaps):
xoff = (norm.shape[2] + padding) * (f % xmaps)
yoff = (norm.shape[1] + padding) * (f / xmaps)
mapout[xoff:xoff+norm.shape[2],yoff:yoff+norm.shape[1]] = norm[min(f,norm.shape[0]-1),:]
outfile = open(folder+'/'+key+'.png', 'wb')
writer = png.Writer(width, height, greyscale=True)
# print(np.uint8(norm[f,:]).shape)
writer.write(outfile, np.uint8(mapout))
outfile.close()
class TestNetEvaluator(object):
def __init__(self, test_net, train_net, data_arrays, options, callback,
output_blob_names=['prob'],
data_offsets={}, scales={}):
self.callback = callback
self.options = options
self.test_net = test_net
self.train_net = train_net
self.datasets = data_arrays
self.thread = None
self.output_blob_names = output_blob_names
self.input_specs = get_net_input_specs( self.test_net, data_offsets, scales)
self.output_specs = get_net_output_specs( self.test_net, output_blob_names, data_offsets, scales)
def run_test(self, iteration):
caffe.select_device(self.options.test_device, False)
for dataset_i in range(len(self.datasets)):
dataset_to_process = self.datasets[dataset_i]
if 'name' in dataset_to_process:
h5_file_name = dataset_to_process['name'] + '_iter_' + str(iteration) + '.h5'
else:
h5_file_name = 'test_out_' + repr(dataset_i) + '_iter_' + str(iteration) + '.h5'
temp_file_name = h5_file_name + '.inprogress'
# fix from Larissa H
with h5py.File(temp_file_name, 'w') as h5_file:
output_array = []
process(self.test_net, [dataset_to_process],
self.output_blob_names,
output_array, self.callback)
for blob_name in self.output_blob_names:
prediction_shape = self.output_specs[blob_name].shape
print('prediction_shape of blob', blob_name, prediction_shape)
out = output_array[0][blob_name]
print(out.shape)
h5_file.create_dataset(name=blob_name, shape=out.shape, dtype=np.float32, data=out)
os.rename(temp_file_name, h5_file_name)
print("Just saved {}".format(h5_file_name))
def evaluate(self, iteration):
# Test/wait if last test is done
if self.thread is not None:
try:
self.thread.join()
except:
self.thread = None
net_weight_transfer(self.test_net, self.train_net)
if config.use_one_thread:
self.run_test(iteration)
else:
self.thread = threading.Thread(target=self.run_test, args=[iteration])
self.thread.start()
def init_solver(solver_config, options):
caffe.set_mode_gpu()
caffe.select_device(options.train_device, False)
solver_inst = caffe.get_solver(solver_config)
if options.test_net is None:
return solver_inst, None
else:
return solver_inst, init_testnet(options.test_net, test_device=options.test_device, level=options.test_level, stages=options.test_stages)
def init_testnet(test_net, trained_model=None, test_device=0, level=0, stages=None):
caffe.set_mode_gpu()
if isinstance(test_device, list):
# Initialize test network for each device
networks = []
for device in test_device:
caffe.select_device(device, False)
if trained_model is None:
networks += [caffe.Net(test_net, caffe.TEST, level=level, stages=stages)]
else:
networks += [caffe.Net(test_net, trained_model, caffe.TEST, level=level, stages=stages)]
return networks
else:
# Initialize test network for a single device
caffe.select_device(test_device, False)
if trained_model is None:
return caffe.Net(test_net, caffe.TEST, level=level, stages=stages)
else:
return caffe.Net(test_net, trained_model, caffe.TEST, level=level, stages=stages)
class InputSpec(object):
def __init__(self, name, memory_layer, blob, shape, data_offset=[], scale=[1], phase=0):
self.name = name
self.memory_layer = memory_layer
self.blob = blob
self.shape = shape
self.spatial_offsets = data_offset
self.scale = scale
self.phase = phase # only added to the network
def compute_spatial_offsets(self, max_shape, reset=False):
if (reset):
self.spatial_offsets = []
self.spatial_offsets = []
for i in range(2 + len(self.spatial_offsets), len(self.shape)):
self.spatial_offsets.append((minidx(self.scale, i - 2) * max_shape[i] - self.shape[i]))
def slice_data(self, batch_size, dataset_indexes, offsets, dataset_combined_sizes, data_arrays):
data_slice = np.asarray([slice_data(data_arrays[dataset_indexes[i]][self.name], [((minidx(self.scale, j) * offsets[i][j] + self.spatial_offsets[j]/2) if (data_arrays[dataset_indexes[i]][self.name].shape[j] == minidx(self.scale, j) * dataset_combined_sizes[i][j]) else (minidx(self.scale, j) * offsets[i][j])) for j in range(0, min(len(offsets[i]),len(self.spatial_offsets)))], self.shape[2:]) for i in range(0, batch_size)])
# print(data_slice.shape)
return data_slice
def scaled_shape(self):
return [self.shape[0], self.shape[1]] + [self.shape[i + 2] / minidx(self.scale, i) for i in range(0, len(self.shape) - 2)]
class OutputSpec(object):
def __init__(self, name, blob, shape, data_offset=[], scale=[1]):
self.name = name
self.blob = blob
self.shape = shape
self.spatial_offsets = data_offset
self.scale = scale
def compute_spatial_offsets(self, max_shape, reset=False):
if (reset):
self.spatial_offsets = []
for i in range(2 + len(self.spatial_offsets), len(self.shape)):
self.spatial_offsets.append((minidx(self.scale, i - 2) * max_shape[i] - self.shape[i]))
def set_slice_data(self, dataset_index, offsets, data_arrays, data_slice):
set_slice_data(data_arrays[dataset_index][self.name], data_slice, [minidx(self.scale, j) * offsets[j] for j in range(0, len(offsets))], self.shape[2:])
def scaled_shape(self):
return [self.shape[0], self.shape[1]] + [self.shape[i + 2] / minidx(self.scale, i) for i in range(0, len(self.shape) - 2)]
def get_net_input_specs(net, data_offsets={}, scales={}):
input_specs = {}
for layer in net.layers:
if (layer.type == 'MemoryData'):
for i in range(0, layer.layer_param.top_size):
blob_name = layer.layer_param.get_top(i)
data_offset = []
scale = [1]
if (blob_name in data_offsets.keys()):
data_offset = data_offsets[blob_name]
if (blob_name in scales.keys()):
scale = scales[blob_name]
blob = net.blobs[blob_name]
input_spec = InputSpec(blob_name, layer, blob, np.shape(blob.data), data_offset, scale)
input_specs[input_spec.name] = input_spec
return input_specs
def get_net_output_specs(net, blob_names, data_offsets={}, scales={}):
output_specs = {}
for blob_name in blob_names:
data_offset = []
scale = [1]
if (blob_name in data_offsets.keys()):
data_offset = data_offsets[blob_name]
if (blob_name in scales.keys()):
scale = scales[blob_name]
output_spec = OutputSpec(blob_name, net.blobs[blob_name], np.shape(net.blobs[blob_name].data), data_offset, scale)
output_specs[output_spec.name] = output_spec
return output_specs
class OffsetGenerator:
def __init__(self, random, net_input_specs={}, net_output_specs={}):
self.random = random
self.dataset_index = 0
self.offsets = []
self.net_input_specs = net_input_specs
self.net_output_specs = net_output_specs
def make_dataset_offsets(self, batch_size, data_arrays, output_arrays=[], max_shape=[], min_shape=[]):
while (len(min_shape) < len(max_shape)):
min_shape = min_shape + [1]
while (len(max_shape) < len(min_shape)):
max_shape = max_shape + [1]
dataset_indexes = []
offsets = []
dataset_combined_sizes = []
if (self.dataset_index < len(data_arrays)):
for i in range(0, batch_size):
dataset_index = random.randint(0, len(data_arrays) - 1)
data_array_keys = data_arrays[dataset_index].keys()
dataset_combined_size = []
for set_key in data_array_keys:
shape = [data_arrays[dataset_index][set_key].shape[j] / minidx(self.net_input_specs[set_key].scale, j) for j in range(0,len(data_arrays[dataset_index][set_key].shape))]
for j in range(0, len(shape)):
if len(dataset_combined_size) <= j:
dataset_combined_size.append(shape[j])
else:
dataset_combined_size[j] = max(dataset_combined_size[j], shape[j])
dataset_combined_sizes.append(dataset_combined_size)
if (self.random):
offset = [random.randint(0, dataset_combined_size[j - 1] - max_shape[j]) for j in range(2, len(max_shape))]
dataset_indexes.append(dataset_index)
offsets.append(offset)
else:
while (len(self.offsets) < len(max(min_shape, max_shape))):
self.offsets.append(0)
dataset_indexes.append(min(self.dataset_index, len(data_arrays) - 1))
for set_key in self.net_output_specs.keys():
if (len(output_arrays) <= dataset_indexes[-1]):
output_arrays.append({})
if not (set_key in output_arrays[dataset_indexes[-1]].keys()):
shape = [self.net_output_specs[set_key].shape[1]] + [dataset_combined_sizes[-1][1 + j] - max_shape[2 + j] + self.net_output_specs[set_key].scaled_shape()[2 + j] for j in range(0, len(self.net_output_specs[set_key].shape) - 2)]
output_arrays[dataset_indexes[-1]][set_key] = np.zeros(tuple(shape), dtype=float32)
offset = copy.deepcopy(self.offsets)
offsets.append(offset)
increased = False
for j in range(0, len(min_shape) - 2):
q = len(min_shape) - 3 - j
while (len(self.offsets) <= q):
self.offsets.append(0)
if (self.offsets[q] + max_shape[2 + q] < dataset_combined_sizes[-1][1 + q]):
self.offsets[q] = self.offsets[q] + min_shape[2 + q]
if (self.offsets[q] + max_shape[2 + q] >= dataset_combined_sizes[-1][1 + q]):
self.offsets[q] = dataset_combined_sizes[-1][1 + q] - max_shape[2 + q]
increased = True
else:
increased = False
self.offsets[q] = 0
if increased:
break
if not increased:
self.dataset_index = self.dataset_index + 1
return dataset_indexes, offsets, dataset_combined_sizes
def train(solver, options, train_data_arrays, data_slice_callback,
test_net, test_data_arrays, test_data_slice_callback,
data_offsets={}, scales={}, test_data_offsets={}, test_scales={},
test_output_blob_names={}, eval=None):
caffe.select_device(options.train_device, False)
net = solver.net
test_eval = None
if eval:
test_eval = eval
elif (options.test_net != None):
test_eval = TestNetEvaluator( test_net, net, test_data_arrays, options, test_data_slice_callback,
output_blob_names=test_output_blob_names)
# Get the networks input specifications
input_specs = get_net_input_specs(net, data_offsets=data_offsets, scales=scales)
max_shape = []
if (len(train_data_arrays) > 0):
dataset_for_keys = train_data_arrays[0]
for set_key in input_specs.keys():
if (input_specs[set_key].name in dataset_for_keys.keys()):
shape = input_specs[set_key].scaled_shape()
for j in range(0, len(shape)):
if len(max_shape) <= j:
max_shape.append(shape[j])
else:
max_shape[j] = max(max_shape[j], shape[j])
for set_key in input_specs.keys():
if (input_specs[set_key].name in train_data_arrays[0].keys()):
input_specs[set_key].compute_spatial_offsets(max_shape)
batch_size = max_shape[0]
net_io = NetInputWrapper(net, input_specs=input_specs)
offset_generator = OffsetGenerator(True, net_input_specs=net_io.input_specs)
# Loop from current iteration to last iteration
for i in range(solver.iter, solver.max_iter):
start = time.time()
if (options.test_net != None and i % options.test_interval == 1
and i > 10 ):
test_eval.evaluate(i)
if config.use_one_thread:
# after testing finishes, switch back to the training device
caffe.select_device(options.train_device, False)
dataset_indexes, offsets, dataset_combined_sizes = offset_generator.make_dataset_offsets(batch_size, train_data_arrays, max_shape=max_shape)
slices = {}
if (len(train_data_arrays) > 0):
dataset_for_keys = train_data_arrays[0]
for set_key in dataset_for_keys.keys():
data_slice = input_specs[set_key].slice_data(batch_size, dataset_indexes, offsets, dataset_combined_sizes, train_data_arrays)
slices[set_key] = data_slice
data_slice_callback(input_specs, batch_size, dataset_indexes, offsets, dataset_combined_sizes, train_data_arrays, slices)
net_io.set_inputs(slices)
loss = solver.step(1) # Single step
while gc.collect():
pass
time_of_iteration = time.time() - start
def process_input_data(net_io, slices):
net_io.set_inputs(slices)
net_io.net.forward()
def process_core_multithreaded(device_locks, net_io, data_slices, dataset_indexes, offsets, output_arrays):
# Each thread sets its GPU
current_device_id = -1
while (current_device_id == -1):
for device_list_id in range(0,len(device_locks)):
if (device_locks[device_list_id].acquire(False)):
current_device_id = device_list_id
break
if current_device_id == -1:
time.sleep(0.0005)
if config.debug:
print("Using device (list ID): ", current_device_id)
# Note that this is the list ID, not the absolute device ID
caffe.select_device(current_device_id, True)
process_core(net_io[current_device_id], data_slices, dataset_indexes, offsets, output_arrays)
device_locks[device_list_id].release()
def process_core(net_io, data_slices, dataset_indexes, offsets, output_arrays):
process_local_net_io = None
if isinstance(net_io, list):
process_local_net_io = net_io[multiprocessing.Process.name]
else:
process_local_net_io = net_io
process_input_data(process_local_net_io, data_slices)
outputs = process_local_net_io.get_outputs()
for i in range(0, len(dataset_indexes)):
index = dataset_indexes[i]
for set_key in outputs.keys():
net_io.output_specs[set_key].set_slice_data(index, offsets[i], output_arrays, outputs[set_key][i])
def process(test_nets, input_arrays, output_blob_names, output_arrays, data_slice_callback, data_offsets={}, scales={}, min_shape_override=[], max_shape_override=[]):
thread_pool = None
device_locks = None
nets = []
net_ios = []
batch_size = 0
if isinstance(test_nets, list):
nets.extend(test_nets)
else:
nets.append(test_nets)
for net in nets:
# Get the networks input specifications
input_specs = get_net_input_specs(net, data_offsets, scales)
output_specs = get_net_output_specs(net, output_blob_names, data_offsets, scales)
# Get the rescaled max and min shapes. The min shape will be the processing stride
max_shape = []
min_shape = []
if (len(input_arrays) > 0):
dataset_for_keys = input_arrays[0]
for set_key in input_specs.keys():
if (input_specs[set_key].name in dataset_for_keys.keys()):
shape = input_specs[set_key].scaled_shape()
for j in range(0, len(shape)):
if len(max_shape) <= j:
max_shape.append(shape[j])
else:
max_shape[j] = max(max_shape[j], shape[j])
for set_key in input_specs.keys():
if (input_specs[set_key].name in input_arrays[0].keys()):
input_specs[set_key].compute_spatial_offsets(max_shape)
for set_key in output_specs.keys():
output_specs[set_key].compute_spatial_offsets(max_shape)
for set_key in output_specs.keys():
shape = output_specs[set_key].scaled_shape()
for j in range(0, len(shape)):
if len(min_shape) <= j:
min_shape.append(shape[j])
else:
min_shape[j] = min(min_shape[j], shape[j])
if len(min_shape_override) > 0:
min_shape = min_shape_override
if len(min_shape_override) > 0:
min_shape = min_shape_override
batch_size = max_shape[0]
net_io = NetInputWrapper(net, input_specs=input_specs, output_specs=output_specs)
net_ios.append(net_io)
# Launch
if len(nets) > 1:
thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=len(nets))
device_locks = []
for device_list_id in range(0,len(nets)):
device_locks += [threading.Lock()]
offset_generator = OffsetGenerator(False, net_input_specs=net_io.input_specs, net_output_specs=net_io.output_specs)
while True:
dataset_indexes, offsets, dataset_combined_sizes = offset_generator.make_dataset_offsets(batch_size, input_arrays, output_arrays=output_arrays, max_shape=max_shape, min_shape=min_shape)
# No more offsets to process, terminate:
if (len(dataset_indexes) == 0):
break
data_slices = {}
if (len(input_arrays) > 0):
dataset_for_keys = input_arrays[0]
for set_key in dataset_for_keys.keys():
data_slice = input_specs[set_key].slice_data(batch_size, dataset_indexes, offsets, dataset_combined_sizes, input_arrays)
data_slices[set_key] = data_slice
data_slice_callback(input_specs, batch_size, dataset_indexes, offsets, dataset_combined_sizes, input_arrays, data_slices)
if len(nets) > 1:
thread_pool.submit(process_core_multithreaded, device_locks, net_io, data_slices, dataset_indexes, offsets, output_arrays)
else:
process_core(net_io, data_slices, dataset_indexes, offsets, output_arrays)
if not (thread_pool is None):
thread_pool.shutdown(True)
| |
import unittest
from .. import normalize_reference, scripture_re, reference_to_string
def f(txt):
"""
accept a string containing a scripture reference, normalize it, and then
return the reformatted string
"""
return reference_to_string(
*normalize_reference(*scripture_re.match(txt).groups()))
class TestBookNames(unittest.TestCase):
def setUp(self):
pass
# Old Testament
def test_genesis(self):
self.assertEqual(f('genesis 1:1'), 'Genesis 1:1')
self.assertEqual(f('gen 1:1'), 'Genesis 1:1')
def test_exodus(self):
self.assertEqual(f('exodus 1:1'), 'Exodus 1:1')
self.assertEqual(f('exod 1:1'), 'Exodus 1:1')
def test_leviticus(self):
self.assertEqual(f('leviticus 1:1'), 'Leviticus 1:1')
self.assertEqual(f('lev 1:1'), 'Leviticus 1:1')
def test_numbers(self):
self.assertEqual(f('numbers 1:1'), 'Numbers 1:1')
self.assertEqual(f('num 1:1'), 'Numbers 1:1')
def test_deuteronomy(self):
self.assertEqual(f('deuteronomy 1:1'), 'Deuteronomy 1:1')
self.assertEqual(f('deut 1:1'), 'Deuteronomy 1:1')
def test_joshua(self):
self.assertEqual(f('joshua 1:1'), 'Joshua 1:1')
self.assertEqual(f('josh 1:1'), 'Joshua 1:1')
def test_judges(self):
self.assertEqual(f('judges 1:1'), 'Judges 1:1')
self.assertEqual(f('judg 1:1'), 'Judges 1:1')
def test_ruth(self):
self.assertEqual(f('ruth 1:1'), 'Ruth 1:1')
def test_i_samuel(self):
self.assertEqual(f('I samuel 1:1'), 'I Samuel 1:1')
self.assertEqual(f('1 samuel 1:1'), 'I Samuel 1:1')
self.assertEqual(f('I sam 1:1'), 'I Samuel 1:1')
self.assertEqual(f('1 sam 1:1'), 'I Samuel 1:1')
self.assertEqual(f('1sam 1:1'), 'I Samuel 1:1')
def test_ii_samuel(self):
self.assertEqual(f('II samuel 1:1'), 'II Samuel 1:1')
self.assertEqual(f('2 samuel 1:1'), 'II Samuel 1:1')
self.assertEqual(f('II sam 1:1'), 'II Samuel 1:1')
self.assertEqual(f('2 sam 1:1'), 'II Samuel 1:1')
self.assertEqual(f('2sam 1:1'), 'II Samuel 1:1')
def test_i_kings(self):
self.assertEqual(f('I kings 1:1'), 'I Kings 1:1')
self.assertEqual(f('1 kings 1:1'), 'I Kings 1:1')
self.assertEqual(f('I kgs 1:1'), 'I Kings 1:1')
self.assertEqual(f('1 kgs 1:1'), 'I Kings 1:1')
self.assertEqual(f('1kgs 1:1'), 'I Kings 1:1')
def test_ii_kings(self):
self.assertEqual(f('II kings 1:1'), 'II Kings 1:1')
self.assertEqual(f('2 kings 1:1'), 'II Kings 1:1')
self.assertEqual(f('II kgs 1:1'), 'II Kings 1:1')
self.assertEqual(f('2 kgs 1:1'), 'II Kings 1:1')
self.assertEqual(f('2kgs 1:1'), 'II Kings 1:1')
def test_i_chronicles(self):
self.assertEqual(f('I chronicles 1:1'), 'I Chronicles 1:1')
self.assertEqual(f('1 chronicles 1:1'), 'I Chronicles 1:1')
self.assertEqual(f('I chr 1:1'), 'I Chronicles 1:1')
self.assertEqual(f('I chro 1:1'), 'I Chronicles 1:1')
self.assertEqual(f('I chron 1:1'), 'I Chronicles 1:1')
self.assertEqual(f('1 chr 1:1'), 'I Chronicles 1:1')
self.assertEqual(f('1 chro 1:1'), 'I Chronicles 1:1')
self.assertEqual(f('1 chron 1:1'), 'I Chronicles 1:1')
def test_ii_chronicles(self):
self.assertEqual(f('II chronicles 1:1'), 'II Chronicles 1:1')
self.assertEqual(f('2 chronicles 1:1'), 'II Chronicles 1:1')
self.assertEqual(f('II chr 1:1'), 'II Chronicles 1:1')
self.assertEqual(f('II chro 1:1'), 'II Chronicles 1:1')
self.assertEqual(f('II chron 1:1'), 'II Chronicles 1:1')
self.assertEqual(f('2 chr 1:1'), 'II Chronicles 1:1')
self.assertEqual(f('2 chro 1:1'), 'II Chronicles 1:1')
self.assertEqual(f('2 chron 1:1'), 'II Chronicles 1:1')
def test_ezra(self):
self.assertEqual(f('ezra 1:1'), 'Ezra 1:1')
def test_nehemiah(self):
self.assertEqual(f('nehemiah 1:1'), 'Nehemiah 1:1')
self.assertEqual(f('neh 1:1'), 'Nehemiah 1:1')
def test_esther(self):
self.assertEqual(f('esther 1:1'), 'Esther 1:1')
self.assertEqual(f('esth 1:1'), 'Esther 1:1')
def test_job(self):
self.assertEqual(f('job 1:1'), 'Job 1:1')
def test_psalms(self):
self.assertEqual(f('psalms 1:1'), 'Psalms 1:1')
self.assertEqual(f('ps 1:1'), 'Psalms 1:1')
self.assertEqual(f('psa 1:1'), 'Psalms 1:1')
self.assertEqual(f('psalm 1:1'), 'Psalms 1:1')
def test_proverbs(self):
self.assertEqual(f('proverbs 1:1'), 'Proverbs 1:1')
self.assertEqual(f('prov 1:1'), 'Proverbs 1:1')
def test_ecclesiastes(self):
self.assertEqual(f('ecclesiastes 1:1'), 'Ecclesiastes 1:1')
self.assertEqual(f('ecc 1:1'), 'Ecclesiastes 1:1')
self.assertEqual(f('eccl 1:1'), 'Ecclesiastes 1:1')
self.assertEqual(f('eccles 1:1'), 'Ecclesiastes 1:1')
def test_song_of_solomon(self):
self.assertEqual(f('song of solomon 1:1'), 'Song of Solomon 1:1')
self.assertEqual(f('song 1:1'), 'Song of Solomon 1:1')
self.assertEqual(f('song of sol 1:1'), 'Song of Solomon 1:1')
def test_isaiah(self):
self.assertEqual(f('isaiah 1:1'), 'Isaiah 1:1')
self.assertEqual(f('isa 1:1'), 'Isaiah 1:1')
def test_jeremiah(self):
self.assertEqual(f('jeremiah 1:1'), 'Jeremiah 1:1')
self.assertEqual(f('jer 1:1'), 'Jeremiah 1:1')
def test_lamentations(self):
self.assertEqual(f('lamentations 1:1'), 'Lamentations 1:1')
self.assertEqual(f('lam 1:1'), 'Lamentations 1:1')
def test_ezekiel(self):
self.assertEqual(f('ezekiel 1:1'), 'Ezekiel 1:1')
self.assertEqual(f('ezek 1:1'), 'Ezekiel 1:1')
def test_daniel(self):
self.assertEqual(f('daniel 1:1'), 'Daniel 1:1')
self.assertEqual(f('dan 1:1'), 'Daniel 1:1')
def test_hosea(self):
self.assertEqual(f('hosea 1:1'), 'Hosea 1:1')
self.assertEqual(f('hos 1:1'), 'Hosea 1:1')
def test_joel(self):
self.assertEqual(f('joel 1:1'), 'Joel 1:1')
def test_amos(self):
self.assertEqual(f('amos 1:1'), 'Amos 1:1')
def test_obadiah(self):
self.assertEqual(f('obadiah 1:1'), 'Obadiah 1')
self.assertEqual(f('obad 1:1'), 'Obadiah 1')
def test_jonah(self):
self.assertEqual(f('jonah 1:1'), 'Jonah 1:1')
self.assertEqual(f('jon 1:1'), 'Jonah 1:1')
def test_micah(self):
self.assertEqual(f('micah 1:1'), 'Micah 1:1')
self.assertEqual(f('mic 1:1'), 'Micah 1:1')
def test_nahum(self):
self.assertEqual(f('nahum 1:1'), 'Nahum 1:1')
self.assertEqual(f('nah 1:1'), 'Nahum 1:1')
def test_habakkuk(self):
self.assertEqual(f('habakkuk 1:1'), 'Habakkuk 1:1')
self.assertEqual(f('hab 1:1'), 'Habakkuk 1:1')
def test_zephaniah(self):
self.assertEqual(f('zephaniah 1:1'), 'Zephaniah 1:1')
self.assertEqual(f('zeph 1:1'), 'Zephaniah 1:1')
def test_haggai(self):
self.assertEqual(f('haggai 1:1'), 'Haggai 1:1')
self.assertEqual(f('hag 1:1'), 'Haggai 1:1')
def test_zechariah(self):
self.assertEqual(f('zechariah 1:1'), 'Zechariah 1:1')
self.assertEqual(f('zech 1:1'), 'Zechariah 1:1')
def test_malachi(self):
self.assertEqual(f('malachi 1:1'), 'Malachi 1:1')
self.assertEqual(f('mal 1:1'), 'Malachi 1:1')
# /Old Testament
# New Testament
def test_matthew(self):
self.assertEqual(f('matthew 1:1'), 'Matthew 1:1')
self.assertEqual(f('matt 1:1'), 'Matthew 1:1')
def test_mark(self):
self.assertEqual(f('mark 1:1'), 'Mark 1:1')
def test_luke(self):
self.assertEqual(f('luke 1:1'), 'Luke 1:1')
def test_john(self):
self.assertEqual(f('john 1:1'), 'John 1:1')
def test_acts(self):
self.assertEqual(f('acts 1:1'), 'Acts 1:1')
def test_romans(self):
self.assertEqual(f('romans 1:1'), 'Romans 1:1')
self.assertEqual(f('rom 1:1'), 'Romans 1:1')
def test_i_corinthians(self):
self.assertEqual(f('I corinthians 1:1'), 'I Corinthians 1:1')
self.assertEqual(f('I cor 1:1'), 'I Corinthians 1:1')
self.assertEqual(f('1 corinthians 1:1'), 'I Corinthians 1:1')
self.assertEqual(f('1 cor 1:1'), 'I Corinthians 1:1')
self.assertEqual(f('Icorinthians 1:1'), 'I Corinthians 1:1')
self.assertEqual(f('Icor 1:1'), 'I Corinthians 1:1')
self.assertEqual(f('1corinthians 1:1'), 'I Corinthians 1:1')
self.assertEqual(f('1cor 1:1'), 'I Corinthians 1:1')
def test_ii_corinthians(self):
self.assertEqual(f('II corinthians 1:1'), 'II Corinthians 1:1')
self.assertEqual(f('II cor 1:1'), 'II Corinthians 1:1')
self.assertEqual(f('2 corinthians 1:1'), 'II Corinthians 1:1')
self.assertEqual(f('2 cor 1:1'), 'II Corinthians 1:1')
self.assertEqual(f('IIcorinthians 1:1'), 'II Corinthians 1:1')
self.assertEqual(f('IIcor 1:1'), 'II Corinthians 1:1')
self.assertEqual(f('2corinthians 1:1'), 'II Corinthians 1:1')
self.assertEqual(f('2cor 1:1'), 'II Corinthians 1:1')
def test_galatians(self):
self.assertEqual(f('galatians 1:1'), 'Galatians 1:1')
self.assertEqual(f('gal 1:1'), 'Galatians 1:1')
def test_ephesians(self):
self.assertEqual(f('ephesians 1:1'), 'Ephesians 1:1')
self.assertEqual(f('eph 1:1'), 'Ephesians 1:1')
def test_philippians(self):
self.assertEqual(f('philippians 1:1'), 'Philippians 1:1')
self.assertEqual(f('phil 1:1'), 'Philippians 1:1')
def test_colossians(self):
self.assertEqual(f('colossians 1:1'), 'Colossians 1:1')
self.assertEqual(f('col 1:1'), 'Colossians 1:1')
def test_i_thessalonians(self):
self.assertEqual(f('I thessalonians 1:1'), 'I Thessalonians 1:1')
self.assertEqual(f('I thess 1:1'), 'I Thessalonians 1:1')
self.assertEqual(f('1 thessalonians 1:1'), 'I Thessalonians 1:1')
self.assertEqual(f('1 thess 1:1'), 'I Thessalonians 1:1')
self.assertEqual(f('Ithessalonians 1:1'), 'I Thessalonians 1:1')
self.assertEqual(f('Ithess 1:1'), 'I Thessalonians 1:1')
self.assertEqual(f('1thessalonians 1:1'), 'I Thessalonians 1:1')
self.assertEqual(f('1thess 1:1'), 'I Thessalonians 1:1')
def test_ii_thessalonians(self):
self.assertEqual(f('II thessalonians 1:1'), 'II Thessalonians 1:1')
self.assertEqual(f('II thess 1:1'), 'II Thessalonians 1:1')
self.assertEqual(f('2 thessalonians 1:1'), 'II Thessalonians 1:1')
self.assertEqual(f('2 thess 1:1'), 'II Thessalonians 1:1')
self.assertEqual(f('IIthessalonians 1:1'), 'II Thessalonians 1:1')
self.assertEqual(f('IIthess 1:1'), 'II Thessalonians 1:1')
self.assertEqual(f('2thessalonians 1:1'), 'II Thessalonians 1:1')
self.assertEqual(f('2thess 1:1'), 'II Thessalonians 1:1')
def test_i_timothy(self):
self.assertEqual(f('I timothy 1:1'), 'I Timothy 1:1')
self.assertEqual(f('I tim 1:1'), 'I Timothy 1:1')
self.assertEqual(f('1 timothy 1:1'), 'I Timothy 1:1')
self.assertEqual(f('1 tim 1:1'), 'I Timothy 1:1')
self.assertEqual(f('Itimothy 1:1'), 'I Timothy 1:1')
self.assertEqual(f('Itim 1:1'), 'I Timothy 1:1')
self.assertEqual(f('1timothy 1:1'), 'I Timothy 1:1')
self.assertEqual(f('1tim 1:1'), 'I Timothy 1:1')
def test_ii_timothy(self):
self.assertEqual(f('II timothy 1:1'), 'II Timothy 1:1')
self.assertEqual(f('II tim 1:1'), 'II Timothy 1:1')
self.assertEqual(f('2 timothy 1:1'), 'II Timothy 1:1')
self.assertEqual(f('2 tim 1:1'), 'II Timothy 1:1')
self.assertEqual(f('IItimothy 1:1'), 'II Timothy 1:1')
self.assertEqual(f('IItim 1:1'), 'II Timothy 1:1')
self.assertEqual(f('2timothy 1:1'), 'II Timothy 1:1')
self.assertEqual(f('2tim 1:1'), 'II Timothy 1:1')
def test_titus(self):
self.assertEqual(f('titus 1:1'), 'Titus 1:1')
self.assertEqual(f('tit 1:1'), 'Titus 1:1')
def test_philemon(self):
self.assertEqual(f('philemon 1:1'), 'Philemon 1')
self.assertEqual(f('phile 1:1'), 'Philemon 1')
self.assertEqual(f('philem 1:1'), 'Philemon 1')
self.assertEqual(f('phlm 1:1'), 'Philemon 1')
def test_hebrews(self):
self.assertEqual(f('hebrews 1:1'), 'Hebrews 1:1')
self.assertEqual(f('heb 1:1'), 'Hebrews 1:1')
def test_james(self):
self.assertEqual(f('james 1:1'), 'James 1:1')
self.assertEqual(f('jas 1:1'), 'James 1:1')
def test_i_peter(self):
self.assertEqual(f('I peter 1:1'), 'I Peter 1:1')
self.assertEqual(f('I pet 1:1'), 'I Peter 1:1')
self.assertEqual(f('1 peter 1:1'), 'I Peter 1:1')
self.assertEqual(f('1 pet 1:1'), 'I Peter 1:1')
self.assertEqual(f('Ipeter 1:1'), 'I Peter 1:1')
self.assertEqual(f('Ipet 1:1'), 'I Peter 1:1')
self.assertEqual(f('1peter 1:1'), 'I Peter 1:1')
self.assertEqual(f('1pet 1:1'), 'I Peter 1:1')
def test_i_peter(self):
self.assertEqual(f('II peter 1:1'), 'II Peter 1:1')
self.assertEqual(f('II pet 1:1'), 'II Peter 1:1')
self.assertEqual(f('2 peter 1:1'), 'II Peter 1:1')
self.assertEqual(f('2 pet 1:1'), 'II Peter 1:1')
self.assertEqual(f('IIpeter 1:1'), 'II Peter 1:1')
self.assertEqual(f('IIpet 1:1'), 'II Peter 1:1')
self.assertEqual(f('2peter 1:1'), 'II Peter 1:1')
self.assertEqual(f('2pet 1:1'), 'II Peter 1:1')
def test_i_john(self):
self.assertEqual(f('I john 1:1'), 'I John 1:1')
self.assertEqual(f('1 john 1:1'), 'I John 1:1')
self.assertEqual(f('Ijohn 1:1'), 'I John 1:1')
self.assertEqual(f('1john 1:1'), 'I John 1:1')
def test_ii_John(self):
self.assertEqual(f('II john 1:1'), 'II John 1')
self.assertEqual(f('2 john 1:1'), 'II John 1')
self.assertEqual(f('IIjohn 1:1'), 'II John 1')
self.assertEqual(f('2john 1:1'), 'II John 1')
def test_iii_john(self):
self.assertEqual(f('III john 1:1'), 'III John 1')
self.assertEqual(f('3 john 1:1'), 'III John 1')
self.assertEqual(f('IIIjohn 1:1'), 'III John 1')
self.assertEqual(f('3john 1:1'), 'III John 1')
def test_jude(self):
self.assertEqual(f('jude 1:1'), 'Jude 1')
def test_revelation(self):
self.assertEqual(f('revelation 1:1'), 'Revelation of Jesus Christ 1:1')
self.assertEqual(f('revelation of jesus christ 1:1'), 'Revelation of Jesus Christ 1:1')
self.assertEqual(f('rev 1:1'), 'Revelation of Jesus Christ 1:1')
# /New Testament
| |
"""Implement a simple shell for running on MicroPython.
MicroPython shell, by Dave Hylands, see:https://github.com/dhylands/upy-shell
This is a very simple command line based shell for MicroPython. It is based on a
stripped down version of cmd, which can be found here:
https://github.com/micropython/micropython-lib/tree/master/cmd
I use it by copying cmd.py an shell.py to my sdcard. Then you can do:
import shell
This will automatically run it. If you want to reinvoke it, then use:
shell.run()
The shell has a notion of current directory, and you can use the cd command to move around.
Use help to find out available commands.
"""
# from __future__ import print_function
import os
import sys
import cmd
import pyb
import time
# TODO:
# - Need to figure out how to get input without echo for term_size
# - Add sys.stdin.isatty() for when we support reading from a file
# - Need to integrate readline in a python callable way (into cmd.py)
# so that the up-arrow works.
# - Need to define input command to use this under windows
MONTH = ('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec')
WEEKDAY = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
def term_size():
"""Print out a sequence of ANSI escape code which will report back the
size of the window.
"""
# ESC 7 - Save cursor position
# ESC 8 - Restore cursor position
# ESC [r - Enable scrolling for entire display
# ESC [row;colH - Move to cursor position
# ESC [6n - Device Status Report - send ESC [row;colR
repl= None
if 'repl_source' in dir(pyb):
repl = pyb.repl_source()
if repl is None:
repl = pyb.USB_VCP()
repl.send(b'\x1b7\x1b[r\x1b[999;999H\x1b[6n')
pos = b''
while True:
char = repl.recv(1)
if char == b'R':
break
if char != b'\x1b' and char != b'[':
pos += char
repl.send(b'\x1b8')
(height, width) = [int(i, 10) for i in pos.split(b';')]
return height, width
# def term_size():
# return (25, 80)
def get_mode(filename):
try:
return os.stat(filename)[0]
except OSError:
return 0
def get_stat(filename):
try:
return os.stat(filename)
except OSError:
return (0,) * 10
def mode_exists(mode):
return mode & 0xc000 != 0
def mode_isdir(mode):
return mode & 0x4000 != 0
def mode_isfile(mode):
return mode & 0x8000 != 0
def print_cols(words, termwidth=79, file=None):
"""Takes a single column of words, and prints it as multiple columns that
will fit in termwidth columns.
"""
width = max([len(word) for word in words])
nwords = len(words)
ncols = max(1, (termwidth + 1) // (width + 1))
nrows = (nwords + ncols - 1) // ncols
for row in range(nrows):
for i in range(row, nwords, nrows):
print('%-*s' % (width, words[i]),
end='\n' if i + nrows >= nwords else ' ',
file=file)
def print_long(filenames, file=None):
"""Prints detailed information about each file passed in."""
for filename in filenames:
stat = get_stat(filename)
mode = stat[0]
if mode_isdir(mode):
mode_str = '/'
else:
mode_str = ''
size = stat[6]
mtime = stat[8]
localtime = time.localtime(mtime)
extra_str = ''
if mtime == 0 and mode == 0:
extra_str = ' <<< Weird Filename???'
print('%6d %s %2d %02d:%02d %s%s%s' % (size, MONTH[localtime[1]],
localtime[2], localtime[4], localtime[5], filename, mode_str, extra_str),
file=file)
def sdcard_present():
"""Determine if the sdcard is present. This current solution is specific
to the pyboard. We should really have a pyb.scard.detected() method
or something.
"""
return pyb.Pin.board.SD.value() == 0
def split_line(line):
"""Splits a line up into individual arguments in a fashion similar to
string.split(), but allowing for embedded spaces by using quotes.
"""
arg = None
args = []
quote_char = None
escape = False
for ch in line:
if escape:
if ch == 'b':
ch = '\b'
elif ch == 'n':
ch = '\n'
elif ch == 'r':
ch = '\r'
elif ch == 't':
ch = '\t'
elif ch == '"' or ch == "'" or ch == '\\' or ch == ' ':
pass
elif ch == 'r':
ch = '\r'
else:
ch = '\\' + ch
escape = False
else:
if ch == '\\':
escape = True
continue
if ch == quote_char:
quote_char = None
continue
if quote_char is None:
if (ch == "'" or ch == '"'):
quote_char = ch
if arg is None:
# This allows empty quotes to create an empty argument
arg = ''
continue
if ch.isspace():
if arg is not None:
args.append(arg)
arg = None
continue
if arg is None:
arg = ''
arg += ch
if arg is not None:
args.append(arg)
return args
def ctime(t):
"""Formats the date/time in a format similar to the date command line
program under linux, which is: Tue Oct 6 16:26:49 PDT 2015. We don't
know the timezone, so we drop that portion.
Like ctime, this function also returns a trailing newline.
"""
(year, month, day, hours, minutes, seconds, weekday, yearday) = time.localtime(t)
return '{} {} {:2} {:02}:{:02}:{:02} {:4}\n'.format(
WEEKDAY[weekday], MONTH[month], day, hours, minutes, seconds, year)
class Shell(cmd.Cmd):
"""Implements the shell as a command line interpreter."""
def __init__(self, **kwargs):
(self.term_height, self.term_width) = term_size()
cmd.Cmd.__init__(self, **kwargs)
self.stdout_to_shell = self.stdout
self.stderr = self.stdout
self.cur_dir = os.getcwd()
self.set_prompt()
def set_prompt(self):
self.prompt = self.cur_dir + '> '
def resolve_path(self, path):
if len(path) > 1 and path[-1] == '/':
# Remove trailing slash from path
path = path[:-1]
if path[0] != '/':
# Relative path
if self.cur_dir[-1] == '/':
path = self.cur_dir + path
else:
path = self.cur_dir + '/' + path
comps = path.split('/')
new_comps = []
for comp in comps:
if comp == '.':
continue
if comp == '..' and len(new_comps) > 1:
new_comps.pop()
else:
new_comps.append(comp)
if len(new_comps) == 1:
return new_comps[0] + '/'
return '/'.join(new_comps)
def emptyline(self):
"""We want empty lines to do nothing. By default they would repeat the
previous command.
"""
pass
def postcmd(self, stop, line):
self.stdout.close()
self.stdout = self.stdout_to_shell
self.set_prompt()
return stop
def line_to_args(self, line):
"""This will convert the line passed into the do_xxx functions into
an array of arguments and handle the Output Redirection Operator.
"""
args = split_line(line)
if '>>' in args:
self.stdout = open(args[-1], 'a')
return args[:-2]
if '>' in args:
self.stdout = open(args[-1], 'w')
return args[:-2]
return args
def help_args(self):
self.stdout.write('Prints out command line arguments.\n')
def do_args(self, line):
args = self.line_to_args(line)
for idx in range(len(args)):
self.stdout.write("arg[%d] = '%s'" % (idx, args[idx]))
def help_cat(self):
self.stdout.write('Concatinate files and send to stdout.\n')
def do_cat(self, line):
args = self.line_to_args(line)
for filename in args:
filename = self.resolve_path(filename)
mode = get_mode(filename)
if not mode_exists(mode):
self.stderr.write("Cannot access '%s': No such file\n" %
filename)
continue
if not mode_isfile(mode):
self.stderr.write("'%s': is not a file\n" % filename)
continue
with open(filename, 'r') as txtfile:
for line in txtfile:
self.stdout.write(line)
def help_cd(self):
self.stdout.write('Changes the current directory\n')
def do_cd(self, line):
args = self.line_to_args(line)
try:
dirname = self.resolve_path(args[0])
except IndexError:
dirname = '/'
mode = get_mode(dirname)
if mode_isdir(mode):
self.cur_dir = dirname
if dirname != '/':
# Make FatFS's notion of current directory agree with our
# own so that other places in the code that call file system
# function using relative paths work properly.
os.chdir(dirname)
else:
self.stderr.write("Directory '%s' does not exist\n" % dirname)
def help_cp(self):
self.stdout.write('Copies contents of source file to a dest file, overwrites dest if exists\n' +
'syntax: cp source dest\n')
def do_cp(self, line):
args = self.line_to_args(line)
if len(args)> 2:
self.stderr.write("Input error!\nsyntax: cp source dest\n")
return 0
source = self.resolve_path(args[0])
mode = get_mode(source)
if not mode_exists(mode):
self.stderr.write("Cannot access '%s': No such file\n" % source)
return 0
if not mode_isfile(mode):
self.stderr.write("'%s': is not a file\n" % filename)
return 0
destination = self.resolve_path(args[1])
with open(destination, "wb") as dest_file:
try:
with open(source, 'rb') as src_file:
while True:
buf = src_file.read(256)
if not buf:
break
dest_file.write(buf)
except OSError:
self.stderr.write("OSError for '%s'\n" % destination)
def help_echo(self):
self.stdout.write('Display a line of text.\n')
def do_echo(self, line):
args = self.line_to_args(line)
self.stdout.write(' '.join(args))
self.stdout.write('\n')
def help_help(self):
self.stdout.write('List available commands with "help" or detailed ' +
'help with "help cmd".\n')
def do_help(self, line):
cmd.Cmd.do_help(self, line)
def help_ls(self):
self.stdout.write('List directory contents.\n' +
'Use ls -a to show hidden files\n')
def do_ls(self, line):
args = self.line_to_args(line)
show_invisible = False
show_long = False
while len(args) > 0 and args[0][0] == '-':
if args[0] == '-a':
show_invisible = True
elif args[0] == '-l':
show_long = True
else:
self.stderr.write("Unrecognized option '%s'" % args[0])
return
args.remove(args[0])
if len(args) == 0:
args.append('.')
for idx in range(len(args)):
dirname = self.resolve_path(args[idx])
mode = get_mode(dirname)
if not mode_exists(mode):
self.stderr.write("Cannot access '%s': No such file or "
"directory\n" % dirname)
continue
if not mode_isdir(mode):
self.stdout.write(dirname)
self.stdout.write('\n')
continue
files = []
if len(args) > 1:
if idx > 0:
self.stdout.write('\n')
self.stdout.write("%s:\n" % dirname)
for filename in os.listdir(dirname):
if dirname[-1] == '/':
full_filename = dirname + filename
else:
full_filename = dirname + '/' + filename
mode = get_mode(full_filename)
if not show_long and mode_isdir(mode):
filename += '/'
if (show_invisible or
(filename[0] != '.' and filename[-1] != '~')):
files.append(filename)
if (len(files) > 0):
if show_long:
print_long(sorted(files), file=self.stdout)
else:
print_cols(sorted(files), self.term_width, file=self.stdout)
def help_micropython(self):
self.stdout.write('Micropython! Call any scripts! Interactive mode! ' +
'Quit with exit()\n')
def do_micropython(self, line):
args = self.line_to_args(line)
source = None
if len(args) == 1:
source = args[-1]
source = self.resolve_path(source)
mode = get_mode(source)
if not mode_exists(mode):
self.stderr.write("Cannot access '%s': No such file\n" %
source)
return
if not mode_isfile(mode):
self.stderr.write("'%s': is not a file\n" % source)
return
if source is None:
self.stdout.write('[Micropython]\n')
while True:
code_str = ''
line = input('|>>> ')
if line[0:4] == 'exit':
break
code_str += '%s\n' % line
if line[-1] == ':':
while True:
line = input('|... ')
if line == '':
break
code_str += '%s\n' % line
exec(code_str)
else:
code_str = ''
with open(source, 'r') as code:
for line in code:
code_str = code_str + line + '\n'
exec(code_str)
def help_mkdir(self):
self.stdout.write('Create directory.\n')
def do_mkdir(self, line):
args = self.line_to_args(line)
target = args[0]
mode = get_mode(target)
if not mode_exists(mode):
os.mkdir(target)
else:
self.stderr.write('%s already exists.' % target)
def help_pwd(self):
self.stdout.write('Prints the current working directory.\n')
def do_pwd(self, line):
self.stdout.write('{}\n'.format(self.cur_dir))
def help_rm(self):
self.stdout.write('Delete files and directories.\n')
def do_rm(self, line):
args = self.line_to_args(line)
try:
os.remove(args[0])
except:
try:
os.rmdir(args[0])
except:
self.stderr.write('%s is not a file or directory.\n' % args[0])
def help_soft_reset(self):
self.stdout.write('Issue Soft Reset.\n')
def do_soft_reset(self, line):
raise SystemExit
def help_get_time(self):
self.stdout.write('Prints the current time in the format: Www Mmm DD HH:MM:SS YYYY\n')
def do_get_time(self, line):
self.stdout.write(ctime(time.time()))
def help_set_time(self):
self.stdout.write('Sets the RTC time.\n' +
'set_time YYYY MM DD HH MM SS\n')
def do_set_time(self, line):
args = split_line(line)
if (len(args) != 6):
self.stderr.write('Expecting 6 arguments in the order: YYYY MM DD HH MM SS\n')
return
try:
(year, month, day, hours, minutes, seconds) = [int(arg) for arg in args]
except:
self.stderr.write("Expecting numeric arguments\n")
return
# Run the date through mktime and back through localtime so that we
# get a normalized date and time, and calculate the weekday
t = time.mktime((year, month, day, hours, minutes, seconds, 0, 0, -1))
(year, month, day, hours, minutes, seconds, weekday, yearday) = time.localtime(t)
rtc = pyb.RTC()
# localtime weekday is 0-6, Monday is 0
# RTC weekday is 1-7, Monday is 1
rtc.datetime((year, month, day, weekday + 1, hours, minutes, seconds, 0))
self.stdout.write(ctime(time.time()))
def help_EOF(self):
self.stdout.write('Control-D to quit.\n')
def do_EOF(self, _):
# The prompt will have been printed, so print a newline so that the
# REPL prompt shows up properly.
self.stdout.write('\n')
return True
def run():
Shell().cmdloop()
run()
| |
#!/router/bin/python
from .stl_general_test import CStlGeneral_Test, CTRexScenario
from functools import wraps
from trex_stl_lib.api import *
import os, sys
import copy
ERROR_LATENCY_TOO_HIGH = 1
ERROR_CNTR_NOT_0 = 2
class STLRX_Test(CStlGeneral_Test):
"""Tests for RX feature"""
def setUp(self):
per_driver_params = {
'net_vmxnet3': {
'rate_percent': 1,
'total_pkts': 50,
'rate_latency': 1,
'latency_9k_enable': False,
'no_vlan_even_in_software_mode': True,
},
'net_ixgbe': {
'rate_percent': 30,
'total_pkts': 1000,
'rate_latency': 1,
'latency_9k_enable': True,
'latency_9k_max_average': 300,
'latency_9k_max_latency': 400,
'no_vlan': True,
'no_ipv6': True,
},
'net_ixgbe_vf': {
'rate_percent': 30,
'total_pkts': 1000,
'rate_latency': 1,
'latency_9k_enable': False,
'no_vlan': True,
'no_ipv6': True,
'no_vlan_even_in_software_mode': True,
'max_pkt_size': 2000, # temporary, until we fix this
},
'net_i40e': {
'rate_percent': 80,
'rate_percent_soft': 10,
'total_pkts': 1000,
'rate_latency': 1,
'latency_9k_enable': True,
'latency_9k_max_average': 100,
'latency_9k_max_latency': 250,
},
'net_i40e_vf': {
'rate_percent': 10,
'rate_percent_soft': 1,
'total_pkts': 1000,
'rate_latency': 1,
'latency_9k_enable': False,
'no_vlan_even_in_software_mode': True,
},
'net_e1000_igb': {
'rate_percent': 80,
'total_pkts': 500,
'rate_latency': 1,
'latency_9k_enable': False,
},
'net_e1000_em': {
'rate_percent': 1,
'total_pkts': 50,
'rate_latency': 1,
'latency_9k_enable': False,
'no_vlan_even_in_software_mode': True,
},
'net_virtio': {
'rate_percent': 1,
'total_pkts': 50,
'rate_latency': 1,
'latency_9k_enable': False,
},
'net_mlx5': {
'rate_percent': 40,
'rate_percent_soft': 0.01 if self.is_vf_nics else 1,
'total_pkts': 1000,
'rate_latency': 0.01 if self.is_vf_nics else 1,
'latency_9k_enable': False if self.is_vf_nics else True,
'latency_9k_max_average': 200,
'latency_9k_max_latency': 200,
},
'net_enic': {
'rate_percent': 1,
'total_pkts': 50,
'rate_latency': 1,
'latency_9k_enable': False,
'rx_bytes_fix': True,
'no_vlan_even_in_software_mode': True,
},
'net_ntacc': {
'rate_percent': 10,
'total_pkts': 1000,
'rate_latency': 1,
'latency_9k_enable': True,
'latency_9k_max_average': 150,
'latency_9k_max_latency': 350,
'no_vlan': True,
'no_ipv6': True,
},
}
CStlGeneral_Test.setUp(self)
assert 'bi' in CTRexScenario.stl_ports_map
self.c = CTRexScenario.stl_trex;
self.tx_port, self.rx_port = CTRexScenario.stl_ports_map['bi'][0]
port_info = self.c.get_port_info(ports = self.rx_port)[0]
self.speed = port_info['speed']
cap = port_info['rx']['caps']
if "flow_stats" not in cap or "latency" not in cap:
self.skip('port {0} does not support RX'.format(self.rx_port))
self.cap = cap
self.is_VM = True if 'VM' in self.modes else False
self.max_flow_stats = port_info['rx']['counters']
if self.max_flow_stats == 1023:
# hack - to identify if --software flag was used on server
software_mode = True
else:
software_mode = False
software_mode = False # fix: need good way to identify software_mode
drv_name = port_info['driver']
self.drv_name = drv_name
self.num_cores = self.c.get_server_system_info().get('dp_core_count', 'Unknown')
mbufs = self.c.get_util_stats()['mbuf_stats']
# currently in MLX drivers, we use 9k mbufs for RX, so we can't use all of them for TX.
if self.drv_name == 'net_mlx5':
self.k9_mbufs = 20
self.k4_mbufs = 20
else:
self.k9_mbufs = 10000
self.k4_mbufs = 10000
for key in mbufs:
if mbufs[key]['9kb'][1] < self.k9_mbufs:
self.k9_mbufs = mbufs[key]['9kb'][1]
if mbufs[key]['4096b'][1] < self.k4_mbufs:
self.k4_mbufs = mbufs[key]['4096b'][1]
print("")
print ("num cores {0} num 9k mbufs {1} num 4k mbufs {2}".format(self.num_cores, self.k9_mbufs, self.k4_mbufs))
if 'no_vlan' in per_driver_params[drv_name] and not software_mode:
self.vlan_support = False
else:
self.vlan_support = True
if 'no_ipv6' in per_driver_params[drv_name] and not software_mode:
self.ipv6_support = False
else:
self.ipv6_support = True
if 'max_pkt_size' in per_driver_params[drv_name]:
self.max_pkt_size = per_driver_params[drv_name]['max_pkt_size']
else:
self.max_pkt_size = 9000
self.rate_percent = per_driver_params[drv_name]['rate_percent']
self.total_pkts = per_driver_params[drv_name]['total_pkts']
self.rate_lat = per_driver_params[drv_name].get('rate_latency', self.rate_percent)
self.rate_fstat = per_driver_params[drv_name].get('rate_percent_soft', self.rate_percent)
self.latency_9k_enable = per_driver_params[drv_name]['latency_9k_enable']
self.latency_9k_max_average = per_driver_params[drv_name].get('latency_9k_max_average')
self.latency_9k_max_latency = per_driver_params[drv_name].get('latency_9k_max_latency')
if self.is_VM:
max_drop_allowed = 5
else:
max_drop_allowed = 0
self.allow_drop = per_driver_params[drv_name].get('allow_packets_drop_num', max_drop_allowed)
self.lat_pps = 1000
self.drops_expected = False
self.c.reset(ports = [self.tx_port, self.rx_port])
if 'rx_bytes_fix' in per_driver_params[drv_name] and per_driver_params[drv_name]['rx_bytes_fix'] == True:
self.fix_rx_byte_count = True
else:
self.fix_rx_byte_count = False
if software_mode:
self.qinq_support = True
else:
self.qinq_support = False
# hack for enic
if 'no_vlan_even_in_software_mode' in per_driver_params[drv_name]:
self.vlan_support = False
self.qinq_support = False
# currently we are not configuring router to vlans
if not self.is_loopback:
self.vlan_support = False
self.qinq_support = False
vm = STLScVmRaw( [ STLVmFlowVar ( "ip_src", min_value="10.0.0.1",
max_value="10.0.0.255", size=4, step=1,op="inc"),
STLVmWrFlowVar (fv_name="ip_src", pkt_offset= "IP.src" ), # write ip to packet IP.src
STLVmFixIpv4(offset = "IP") # fix checksum
]
# Latency is bound to one core. We test that this option is not causing trouble
,cache_size =255 # Cache is ignored by latency flows. Need to test it is not crashing.
);
vm_random_size = STLScVmRaw( [ STLVmFlowVar(name="fv_rand", min_value=100, max_value=1500, size=2, op="random"),
STLVmTrimPktSize("fv_rand"), # total packet size
STLVmWrFlowVar(fv_name="fv_rand", pkt_offset= "IP.len", add_val=-14), # fix ip len
STLVmFixIpv4(offset = "IP"), # fix checksum
STLVmWrFlowVar(fv_name="fv_rand", pkt_offset= "UDP.len", add_val=-34) # fix udp len
]
)
self.pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('Your_paylaod_comes_here'))
self.vlan_pkt = STLPktBuilder(pkt = Ether()/Dot1Q()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('Your_paylaod_comes_here'))
self.qinq_pkt = STLPktBuilder(pkt = Ether(type=0x88A8)/Dot1Q(vlan=19)/Dot1Q(vlan=11)/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('Your_paylaod_comes_here'))
self.ipv6pkt = STLPktBuilder(pkt = Ether()/IPv6(dst="2001:0:4137:9350:8000:f12a:b9c8:2815",src="2001:4860:0:2001::68")
/UDP(dport=12,sport=1025)/('Your_paylaod_comes_here'))
self.large_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*1000))
self.pkt_9k = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*9000))
self.vm_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")
/ UDP(dport=12,sport=1025)/('Your_paylaod_comes_here')
, vm = vm)
self.vm_large_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*1000)
, vm = vm)
self.vm_rand_size_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*1500)
, vm = vm_random_size)
# Packet size is 8202, with 2k mbuf size in RX, this makes 4 2K mbufs, plus leftover of 10 bytes in 5th mbuf
# This test that latency code can handle the situation where latency data is not contiguous in memory
self.vm_9k_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*8160)
,vm = vm)
# skip mlx5 VF
self.mlx5_defect_dpdk1711_3 = CTRexScenario.setup_name in ['trex23']
#self.mlx5_defect_dpdk1711_3 =False
# the setup is like that
#
# p0(VF) p1(VF) p2(VF) p3 (VF)
# PF0 PF1
# ---------
# we don't have control on the PF that change the way it count the packets +CRC so we disable the test
#
self.i40e_vf_setup_disable = CTRexScenario.setup_name in ['trex22']
self.errs = []
@classmethod
def tearDownClass(cls):
if CTRexScenario.stl_init_error:
return
# connect back at end of tests
if not cls.is_connected():
CTRexScenario.stl_trex.connect()
# Can use this to run a test multiple times
def run_many_times(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
num_failed = 0
max_tries = 100
num_tries = 1
while num_tries <= max_tries:
try:
func(self, *args, **kwargs)
print ("Try {0} - OK".format(num_tries))
num_tries += 1
continue
except STLError as e:
print ("Try {0} failed ********************".format(num_tries))
num_tries += 1
num_failed += 1
#assert False , '{0}'.format(e)
print("Failed {0} times out of {1} tries".format(num_failed, num_tries-1))
return wrapped
def try_few_times_on_vm(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
# we see random failures with mlx, so do retries on it as well
if self.is_VM:
max_tries = 4
else:
max_tries = 1
num_tries = 1
while num_tries <= max_tries:
try:
func(self, *args, **kwargs)
break
except STLError as e:
num_tries += 1
if num_tries > max_tries:
print ("Try {0} failed. Giving up".format(num_tries))
assert False , '{0}'.format(e)
else:
print ("Try {0} failed. Retrying".format(num_tries))
print("({0}".format(e))
return wrapped
def __verify_latency (self, latency_stats,max_latency,max_average):
error=0;
err_latency = latency_stats['err_cntrs']
latency = latency_stats['latency']
for key in err_latency :
error +=err_latency[key]
if error !=0 :
pprint.pprint(err_latency)
return ERROR_CNTR_NOT_0
tmp = 'Latency results, Average {0} usec, ,Max {1} usec. '.format(int(latency['average']), int(latency['total_max']))
print(tmp)
if latency['average']> max_average:
pprint.pprint(latency_stats)
tmp = 'Average latency is too high {0} {1} '.format(latency['average'], max_average)
print(tmp)
return ERROR_LATENCY_TOO_HIGH
if latency['total_max']> max_latency:
pprint.pprint(latency_stats)
tmp = 'Max latency is too high {0} {1} '.format(latency['total_max'], max_latency)
print(tmp)
return ERROR_LATENCY_TOO_HIGH
return 0
def __exit_with_error(self, stats, xstats, err, pkt_len=0, pkt_type=""):
if pkt_len != 0:
print("Failed with packet: type {0}, len {1}".format(pkt_type, pkt_len))
pprint.pprint(stats)
pprint.pprint(xstats)
raise STLError(err)
def __verify_flow (self, pg_id, total_pkts, pkt_len, pkt_type, stats, xstats):
flow_stats = stats['flow_stats'].get(pg_id)
if 'latency' in stats and stats['latency'] is not None:
latency_stats = stats['latency'].get(pg_id)
else:
latency_stats = None
if not flow_stats:
assert False, "no flow stats available"
tx_pkts = flow_stats['tx_pkts'].get(self.tx_port, 0)
# for continues tests, we do not know how many packets were sent
if total_pkts == 0:
total_pkts = tx_pkts
tx_bytes = flow_stats['tx_bytes'].get(self.tx_port, 0)
rx_pkts = flow_stats['rx_pkts'].get(self.rx_port, 0)
if latency_stats is not None:
drops = latency_stats['err_cntrs']['dropped']
ooo = latency_stats['err_cntrs']['out_of_order']
dup = latency_stats['err_cntrs']['dup']
sth = latency_stats['err_cntrs']['seq_too_high']
stl = latency_stats['err_cntrs']['seq_too_low']
lat = latency_stats['latency']
if ooo != 0 or dup != 0 or stl != 0:
self.__exit_with_error(latency_stats, xstats,
'Error packets - dropped:{0}, ooo:{1} dup:{2} seq too high:{3} seq too low:{4}'.format(drops, ooo, dup, sth, stl)
, pkt_len, pkt_type)
if (drops > self.allow_drop or sth > self.allow_drop) and not self.drops_expected:
self.__exit_with_error(latency_stats, xstats,
'Error packets - dropped:{0}, ooo:{1} dup:{2} seq too high:{3} seq too low:{4}'.format(drops, ooo, dup, sth, stl)
, pkt_len, pkt_type)
if tx_pkts != total_pkts:
pprint.pprint(flow_stats)
self.__exit_with_error(flow_stats, xstats
, 'TX pkts mismatch - got: {0}, expected: {1}'.format(tx_pkts, total_pkts)
, pkt_len, pkt_type)
# pkt_len == 0, means do not compare pkt length (used for streams with random length)
if pkt_len != 0 and tx_bytes != (total_pkts * pkt_len):
self.__exit_with_error(flow_stats, xstats
, 'TX bytes mismatch - got: {0}, expected: {1}'.format(tx_bytes, (total_pkts * pkt_len))
, pkt_len, pkt_type)
if abs(total_pkts - rx_pkts) > self.allow_drop and not self.drops_expected:
self.__exit_with_error(flow_stats, xstats
, 'RX pkts mismatch - got: {0}, expected: {1}'.format(rx_pkts, total_pkts)
, pkt_len, pkt_type)
if pkt_len != 0:
rx_pkt_len = pkt_len
if self.fix_rx_byte_count:
# Patch. Vic card always add vlan, so we should expect 4 extra bytes in each packet
rx_pkt_len += 4
if "rx_bytes" in self.cap:
rx_bytes = flow_stats['rx_bytes'].get(self.rx_port, 0)
if abs(rx_bytes / rx_pkt_len - total_pkts ) > self.allow_drop and not self.drops_expected:
self.__exit_with_error(flow_stats, xstats
, 'RX bytes mismatch - got: {0}, expected: {1}'.format(rx_bytes, (total_pkts * rx_pkt_len))
, pkt_len, pkt_type)
# RX itreation
def __rx_iteration (self, exp_list, duration=0):
self.c.clear_stats()
if duration != 0:
self.c.start(ports = [self.tx_port], duration=duration)
self.c.wait_on_traffic(ports = [self.tx_port],timeout = duration+10,rx_delay_ms = 100)
else:
self.c.start(ports = [self.tx_port])
self.c.wait_on_traffic(ports = [self.tx_port])
stats = self.get_stats()
xstats = self.c.get_xstats(self.rx_port)
total_tx = 0
total_rx = 0
for exp in exp_list:
add_tx,add_rx = self.count_tx_pkts(exp['pg_id'], stats)
total_tx += add_tx
total_rx += add_rx
if total_tx != total_rx:
print("Total TX packets: {0}, total RX: {1} diff: {2}".format(total_tx, total_rx, total_tx-total_rx))
for exp in exp_list:
if 'pkt_type' in exp:
pkt_type = exp['pkt_type']
else:
pkt_type = "not specified"
self.__verify_flow(exp['pg_id'], exp['total_pkts'], exp['pkt_len'], pkt_type, stats, xstats)
def count_tx_pkts(self, pg_id, stats):
flow_stats = stats['flow_stats'].get(pg_id)
if not flow_stats:
assert False, "no flow stats available"
tx_pkts = flow_stats['tx_pkts'].get(self.tx_port, 0)
rx_pkts = flow_stats['rx_pkts'].get(self.rx_port, 0)
return tx_pkts, rx_pkts
# one stream on TX --> RX
@try_few_times_on_vm
def test_one_stream(self):
if self.drv_name == 'net_i40e_vf':
self.skip('Not running on i40 vf currently due to trex-513 ')
total_pkts = self.total_pkts
self.c.reset()
s1 = STLStream(name = 'rx',
packet = self.pkt,
flow_stats = STLFlowLatencyStats(pg_id = 5),
mode = STLTXSingleBurst(total_pkts = total_pkts,
percentage = self.rate_lat
))
# add both streams to ports
self.c.add_streams([s1], ports = [self.tx_port])
print("\ninjecting {0} packets on port {1}\n".format(total_pkts, self.tx_port))
exp = {'pg_id': 5, 'total_pkts': total_pkts, 'pkt_len': s1.get_pkt_len()}
self.__rx_iteration( [exp] )
@try_few_times_on_vm
def test_multiple_streams(self):
self._test_multiple_streams(False)
@try_few_times_on_vm
def test_multiple_streams_random(self):
if self.drv_name == 'net_i40e_vf':
self.skip('Not running on i40 vf currently')
self._test_multiple_streams(True)
def _test_multiple_streams(self, is_random):
if self.is_virt_nics:
self.skip('Skip this for virtual NICs')
all_ports = list(CTRexScenario.stl_ports_map['map'].keys());
self.c.reset(ports = all_ports)
if is_random:
num_latency_streams = random.randint(1, 128);
num_flow_stat_streams = random.randint(1, self.max_flow_stats);
all_pkts = [self.pkt]
if self.ipv6_support:
all_pkts.append(self.ipv6pkt)
if self.vlan_support:
all_pkts.append(self.vlan_pkt)
if self.qinq_support:
all_pkts.append(self.qinq_pkt)
else:
num_latency_streams = 128
num_flow_stat_streams = self.max_flow_stats
total_pkts = int(self.total_pkts / (num_latency_streams + num_flow_stat_streams))
if total_pkts == 0:
total_pkts = 1
percent_lat = float(self.rate_lat) / num_latency_streams
percent_fstat = float(self.rate_fstat) / num_flow_stat_streams
print("num_latency_streams:{0}".format(num_latency_streams))
if is_random:
print(" total percent:{0} ({1} per stream)".format(percent_lat * num_latency_streams, percent_lat))
print("num_flow_stat_streams:{0}".format(num_flow_stat_streams))
if is_random:
print(" total percent:{0} ({1} per stream)".format(percent_fstat * num_flow_stat_streams, percent_fstat))
streams = []
exp = []
num_9k = 0
num_4k = 0
for pg_id in range(1, num_latency_streams):
if is_random:
pkt = copy.deepcopy(all_pkts[random.randint(0, len(all_pkts) - 1)])
pkt.set_packet(pkt.pkt / ('a' * random.randint(0, self.max_pkt_size - len(pkt.pkt))))
# don't use more than half of the 9k mbufs. If we are out of 4k, we can use 9k, so limit 4k accordingly
if len(pkt.pkt) >= 4 * 1024:
num_9k += self.num_cores
if num_9k > self.k9_mbufs / 2:
self.max_pkt_size = 4000
if len(pkt.pkt) >= 2 * 1024 and len(pkt.pkt) < 4 * 1024:
num_4k += self.num_cores
if num_4k + num_4k > self.k4_mbufs + self.k9_mbufs / 2:
self.max_pkt_size = 2000
send_mode = STLTXCont(percentage = percent_lat)
else:
pkt = self.pkt
send_mode = STLTXSingleBurst(total_pkts = total_pkts+pg_id, percentage = percent_lat)
streams.append(STLStream(name = 'rx {0}'.format(pg_id),
packet = pkt,
flow_stats = STLFlowLatencyStats(pg_id = pg_id),
mode = send_mode))
if is_random:
exp.append({'pg_id': pg_id, 'total_pkts': 0, 'pkt_len': streams[-1].get_pkt_len()
, 'pkt_type': pkt.pkt.sprintf("%Ether.type%")})
else:
exp.append({'pg_id': pg_id, 'total_pkts': total_pkts+pg_id, 'pkt_len': streams[-1].get_pkt_len()})
for pg_id in range(num_latency_streams + 1, num_latency_streams + num_flow_stat_streams):
if is_random:
pkt = copy.deepcopy(all_pkts[random.randint(0, len(all_pkts) - 1)])
pkt.set_packet(pkt.pkt / ('a' * random.randint(0, self.max_pkt_size - len(pkt.pkt))))
# don't use more than half of the 9k mbufs. If we are out of 4k, we can use 9k, so limit 4k accordingly
if len(pkt.pkt) >= 4 * 1024:
num_9k += self.num_cores
if num_9k > self.k9_mbufs / 2:
self.max_pkt_size = 4000
if len(pkt.pkt) >= 2000 and len(pkt.pkt) < 4 * 1024:
num_4k += self.num_cores
if num_4k + num_9k > self.k4_mbufs + self.k9_mbufs / 2:
self.max_pkt_size = 2000
send_mode = STLTXCont(percentage = percent_fstat)
else:
pkt = self.pkt
send_mode = STLTXSingleBurst(total_pkts = total_pkts+pg_id, percentage = percent_fstat)
streams.append(STLStream(name = 'rx {0}'.format(pg_id),
packet = pkt,
flow_stats = STLFlowStats(pg_id = pg_id),
mode = send_mode))
if is_random:
exp.append({'pg_id': pg_id, 'total_pkts': 0, 'pkt_len': streams[-1].get_pkt_len()
, 'pkt_type': pkt.pkt.sprintf("%Ether.type%")})
else:
exp.append({'pg_id': pg_id, 'total_pkts': total_pkts+pg_id, 'pkt_len': streams[-1].get_pkt_len()})
# add both streams to ports
self.c.add_streams(streams, ports = [self.tx_port])
if is_random:
duration = 60
print("Duration: {0}".format(duration))
else:
duration = 0
self.__rx_iteration(exp, duration = duration)
@try_few_times_on_vm
def test_1_stream_many_iterations (self):
if self.i40e_vf_setup_disable:
self.skip('i40e_vf_setup_disable')
total_pkts = self.total_pkts
streams_data = [
{'name': 'Latency, with field engine of random packet size', 'pkt': self.vm_rand_size_pkt, 'lat': True},
{'name': 'Flow stat. No latency', 'pkt': self.pkt, 'lat': False},
{'name': 'Latency, no field engine', 'pkt': self.pkt, 'lat': True},
{'name': 'Latency, short packet with field engine', 'pkt': self.vm_pkt, 'lat': True},
{'name': 'Latency, large packet field engine', 'pkt': self.vm_large_pkt, 'lat': True},
{'name': 'Latency, 9k packet with field engine', 'pkt': self.vm_9k_pkt, 'lat': True}
]
if self.vlan_support:
streams_data.append({'name': 'Flow stat with vlan. No latency', 'pkt': self.vlan_pkt, 'lat': False})
if self.qinq_support:
streams_data.append({'name': 'Flow stat qinq. No latency', 'pkt': self.qinq_pkt, 'lat': False})
if self.ipv6_support:
streams_data.append({'name': 'IPv6 flow stat. No latency', 'pkt': self.ipv6pkt, 'lat': False})
streams_data.append({'name': 'IPv6 latency, no field engine', 'pkt': self.ipv6pkt, 'lat': True})
self.c.reset()
streams = []
for data in streams_data:
if data['lat']:
flow_stats = STLFlowLatencyStats(pg_id = 5)
mode = STLTXSingleBurst(total_pkts = total_pkts, pps = self.lat_pps)
else:
flow_stats = STLFlowStats(pg_id = 5)
mode = STLTXSingleBurst(total_pkts = total_pkts, percentage = self.rate_percent)
s = STLStream(name = data['name'],
packet = data['pkt'],
flow_stats = flow_stats,
mode = mode
)
streams.append(s)
print("\ninjecting {0} packets on port {1}".format(total_pkts, self.tx_port))
exp = {'pg_id': 5, 'total_pkts': total_pkts}
for stream in streams:
self.c.add_streams([stream], ports = [self.tx_port])
print("Stream: {0}".format(stream.name))
if 'random packet size' in stream.name:
# hack for not trying to check match in received byte len when using random size packets
exp['pkt_len'] = 0
else:
exp['pkt_len'] = stream.get_pkt_len()
if self.is_VM:
num_repeats = 1
else:
num_repeats = 10
for i in range(1, num_repeats + 1):
if num_repeats > 1:
print("Iteration {0}".format(i))
try:
self.__rx_iteration( [exp] )
except STLError as e:
self.c.remove_all_streams(ports = [self.tx_port])
raise e
self.c.remove_all_streams(ports = [self.tx_port])
def __9k_stream(self, pgid, ports, percent, max_latency, avg_latency, duration, pkt_size):
my_pg_id=pgid
s_ports=ports;
all_ports=list(CTRexScenario.stl_ports_map['map'].keys());
if ports == None:
s_ports=all_ports
assert( type(s_ports)==list)
stream_pkt = STLPktBuilder(pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)/('a'*pkt_size))
try:
# reset all ports
self.c.reset(ports = all_ports)
for pid in s_ports:
s1 = STLStream(name = 'rx',
packet = self.pkt,
flow_stats = STLFlowLatencyStats(pg_id = my_pg_id+pid),
mode = STLTXCont(pps = 1000))
s2 = STLStream(name = 'bulk',
packet = stream_pkt,
mode = STLTXCont(percentage = percent))
# add both streams to ports
self.c.add_streams([s1,s2], ports = [pid])
self.c.clear_stats()
self.c.start(ports = s_ports,duration = duration)
self.c.wait_on_traffic(ports = s_ports,timeout = duration+10,rx_delay_ms = 100)
stats = self.get_stats()
for pid in s_ports:
latency_stats = stats['latency'].get(my_pg_id+pid)
err = self.__verify_latency(latency_stats, max_latency, avg_latency)
if err != 0:
flow_stats = stats['flow_stats'].get(my_pg_id + pid)
xstats = self.c.get_xstats(self.rx_port)
pprint.pprint(flow_stats)
pprint.pprint(xstats)
if err != ERROR_LATENCY_TOO_HIGH:
assert False, 'RX pkts error - one of the error counters is not 0'
return err
except STLError as e:
assert False , '{0}'.format(e)
# Verify that there is low latency with random packet size,duration and ports
@try_few_times_on_vm
def test_9k_stream(self):
if self.is_virt_nics:
self.skip('Skip this for virtual NICs')
if self.latency_9k_enable == False:
print("SKIP")
return
for i in range(0,5):
print("Iteration {0}".format(i));
duration=random.randint(10, 70);
pgid=random.randint(1, 65000);
pkt_size=random.randint(1000, 9000);
all_ports = list(CTRexScenario.stl_ports_map['map'].keys());
s_port=random.sample(all_ports, random.randint(1, len(all_ports)) )
s_port=sorted(s_port)
if ((self.speed == 40) or (self.speed == 100)):
# the NIC does not support all full rate in case both port works let's filter odd ports
s_port=list(filter(lambda x: x % 2==0, s_port))
if len(s_port)==0:
s_port=[0];
error=1;
for j in range(0,5):
print(" {4} - duration {0} pgid {1} pkt_size {2} s_port {3} ".format(duration,pgid,pkt_size,s_port,j));
if self.__9k_stream(pgid,
s_port, self.rate_percent,
self.latency_9k_max_latency,
self.latency_9k_max_average,
duration,
pkt_size)==0:
error=0;
break;
if error:
assert False , "Latency too high"
else:
print("===>Iteration {0} PASS {1}".format(i,j));
def check_stats(self, a, b, err):
if a != b:
tmp = 'ERROR field : {0}, read : {1} != expected : {2} '.format(err,a,b)
print(tmp)
self.errs.append(tmp)
def send_1_burst(self, client_ports, is_latency, pkts):
self.errs = []
base_pkt = Ether()/IP(src="16.0.0.1",dst="48.0.0.1")/UDP(dport=12,sport=1025)
pad = (60 - len(base_pkt)) * 'x'
stream_pkt = STLPktBuilder(pkt = base_pkt/pad)
# reset all ports
self.c.reset()
for c_port in client_ports:
if is_latency:
s1 = STLStream(name = 'rx',
packet = stream_pkt,
flow_stats = STLFlowLatencyStats(pg_id = 5 + c_port),
mode = STLTXSingleBurst(total_pkts = pkts, pps = 1000))
else:
s1 = STLStream(name = 'rx',
packet = stream_pkt,
mode = STLTXSingleBurst(total_pkts = pkts, pps = 1000))
# add both streams to ports
self.c.add_streams(s1, ports = [c_port])
self.c.clear_stats()
self.c.start(ports = client_ports)
self.c.wait_on_traffic(ports = client_ports)
pgid_stats = self.get_stats()
stats = self.c.get_stats()
bytes = pkts * 64
total_pkts = pkts * len(client_ports)
total_bytes = total_pkts * 64
tps = stats['total']
self.check_stats(tps['ibytes'], total_bytes, "tps[ibytes]")
self.check_stats(tps['obytes'], total_bytes, "tps[obytes]")
self.check_stats(tps['ipackets'], total_pkts, "tps[ipackets]")
self.check_stats(tps['opackets'], total_pkts, "tps[opackets]")
for c_port in client_ports:
s_port = CTRexScenario.stl_ports_map['map'][c_port]
ips = stats[s_port]
ops = stats[c_port]
self.check_stats(ops["obytes"], bytes, "stats[%s][obytes]" % c_port)
self.check_stats(ops["opackets"], pkts, "stats[%s][opackets]" % c_port)
self.check_stats(ips["ibytes"], bytes, "stats[%s][ibytes]" % s_port)
self.check_stats(ips["ipackets"], pkts, "stats[%s][ipackets]" % s_port)
if is_latency:
ls = pgid_stats['flow_stats'][5 + c_port]
self.check_stats(ls['rx_pkts']['total'], pkts, "ls['rx_pkts']['total']")
self.check_stats(ls['rx_pkts'][s_port], pkts, "ls['rx_pkts'][%s]" % s_port)
self.check_stats(ls['tx_pkts']['total'], pkts, "ls['tx_pkts']['total']")
self.check_stats(ls['tx_pkts'][c_port], pkts, "ls['tx_pkts'][%s]" % c_port)
self.check_stats(ls['tx_bytes']['total'], bytes, "ls['tx_bytes']['total']")
self.check_stats(ls['tx_bytes'][c_port], bytes, "ls['tx_bytes'][%s]" % c_port)
if self.errs:
pprint.pprint(stats)
msg = 'Stats do not match the expected:\n' + '\n'.join(self.errs)
raise STLError(msg)
@try_few_times_on_vm
def test_fcs_stream(self):
if self.i40e_vf_setup_disable:
self.skip('Skip for vf_setup')
""" this test send 1 64 byte packet with latency and check that all counters are reported as 64 bytes"""
ports = list(CTRexScenario.stl_ports_map['map'].keys())
for lat in [True, False]:
print("\nSending from ports: {0}, has latency: {1} ".format(ports, lat))
self.send_1_burst(ports, lat, 100)
# this test adds more and more latency streams and re-test with incremental
@try_few_times_on_vm
def test_incremental_latency_streams (self):
if self.mlx5_defect_dpdk1711_3:
self.skip('Skip for mlx5_defect_dpdk1711_3')
if self.i40e_vf_setup_disable:
self.skip('Skip for vf_setup')
if self.is_virt_nics:
self.skip('Skip this for virtual NICs')
total_pkts = self.total_pkts
percent = 0.5
self.c.reset()
try:
# We run till maximum streams allowed. At some point, expecting drops, because rate is too high.
# then run with less streams again, to see that system is still working.
for num_iter in [128, 5]:
exp = []
for i in range(1, num_iter):
# mix small and large packets
if i % 2 != 0:
my_pkt = self.pkt
else:
my_pkt = self.large_pkt
s1 = STLStream(name = 'rx',
packet = my_pkt,
flow_stats = STLFlowLatencyStats(pg_id = i),
mode = STLTXSingleBurst(total_pkts = total_pkts,
percentage = percent
))
# add both streams to ports
self.c.add_streams([s1], ports = [self.tx_port])
total_percent = i * percent
if total_percent > self.rate_lat:
self.drops_expected = True
else:
self.drops_expected = False
print("port {0} : {1} streams at {2}% of line rate\n".format(self.tx_port, i, total_percent))
exp.append({'pg_id': i, 'total_pkts': total_pkts, 'pkt_len': s1.get_pkt_len()})
self.__rx_iteration( exp )
self.c.remove_all_streams(ports = [self.tx_port])
except STLError as e:
assert False , '{0}'.format(e)
# counters get stuck in i40e when they are getting to limit.
# this test checks our workaround to this issue
def test_x710_counters_wraparound(self):
if self.drv_name != 'net_i40e':
self.skip('Test is only for i40e.')
percent = min(20, self.speed * 0.8) # 8G at X710 and 20G at XL710
total_pkts = 300000000 # send 300 million packets to ensure getting to threshold of reset several times
s1 = STLStream(name = 'wrapping_stream',
packet = self.pkt,
flow_stats = STLFlowStats(pg_id = 5),
mode = STLTXSingleBurst(total_pkts = total_pkts,
percentage = percent))
# add both streams to ports
self.c.add_streams([s1], ports = [self.tx_port])
print("\ninjecting {0} packets on port {1}\n".format(total_pkts, self.tx_port))
exp = {'pg_id': 5, 'total_pkts': total_pkts, 'pkt_len': s1.get_pkt_len()}
self.__rx_iteration( [exp] )
def get_stats(self):
old_stats = self.c.get_stats()
new_stats = self.c.get_pgid_stats()
if 'latency' in new_stats:
if old_stats['latency'] != new_stats['latency']:
print ("New and old stats differ in latency")
print(old_stats['latency'])
print(new_stats['latency'])
assert False , "New and old stats differ in latency"
if 'flow_stats' in new_stats:
for pg_id in old_stats['flow_stats']:
for field in ['rx_pkts', 'tx_pkts', 'tx_bytes']:
if pg_id in old_stats['flow_stats'] and pg_id in new_stats['flow_stats']:
if field in old_stats['flow_stats'][pg_id] and field in new_stats['flow_stats'][pg_id]:
if old_stats['flow_stats'][pg_id][field] != new_stats['flow_stats'][pg_id][field]:
print ("New and old stats differ in flow_stats")
print("**********************")
print(old_stats['flow_stats'][pg_id][field])
print("**********************")
print(new_stats['flow_stats'][pg_id][field])
print("**********************")
assert False , "New and old stats differ in flow stat"
return new_stats
| |
# -*- coding: utf-8 -*-
"""Tests for deprecation tools."""
#
# (C) Pywikibot team, 2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
from pywikibot.tools import (
deprecated, deprecate_arg, deprecated_args, add_full_name, remove_last_args
)
from tests.aspects import unittest, DeprecationTestCase
@add_full_name
def noop(foo=None):
"""Dummy decorator."""
def decorator(obj):
def wrapper(*args, **kwargs):
raise Exception(obj.__full_name__)
return obj(*args, **kwargs)
return wrapper
return decorator
@add_full_name
def noop2():
"""Dummy decorator."""
def decorator(obj):
def wrapper(*args, **kwargs):
raise Exception(obj.__full_name__)
return obj(*args, **kwargs)
return wrapper
return decorator
@noop()
def decorated_func():
"""Test dummy decorator."""
pass
@noop(foo='bar')
def decorated_func2():
"""Test dummy decorator."""
pass
@noop('baz')
def decorated_func3():
"""Test dummy decorator."""
pass
class DecoratorFullNameTestCase(DeprecationTestCase):
"""Class with methods deprecated."""
net = False
def test_add_full_name_decorator(self):
self.assertRaisesRegex(
Exception,
__name__ + '.decorated_func',
decorated_func)
self.assertRaisesRegex(
Exception,
__name__ + '.decorated_func2',
decorated_func2)
self.assertRaisesRegex(
Exception,
__name__ + '.decorated_func3',
decorated_func3)
@deprecated()
def deprecated_func(foo=None):
"""Deprecated function."""
return foo
@deprecated
def deprecated_func2(foo=None):
"""Deprecated function."""
return foo
@deprecated(instead='baz')
def deprecated_func_instead(foo=None):
"""Deprecated function."""
return foo
@deprecated()
def deprecated_func_bad_args(self):
"""Deprecated function with arg 'self'."""
return self
@deprecate_arg('bah', 'foo')
def deprecated_func_arg(foo=None):
"""Deprecated arg 'bah'."""
return foo
@deprecated_args(bah='foo')
def deprecated_func_arg2(foo=None):
"""Test deprecated_args with one rename."""
return foo
@deprecated_args(bah='foo', silent=False, loud=True, old=None)
def deprecated_func_arg3(foo=None):
"""Test deprecated_args with three drops and one rename."""
return foo
@remove_last_args(['foo', 'bar'])
def deprecated_all():
return None
@remove_last_args(['bar'])
def deprecated_all2(foo):
return foo
class DeprecatedMethodClass(object):
"""Class with methods deprecated."""
@classmethod
@deprecated()
def class_method(cls, foo=None):
return foo
@staticmethod
@deprecated()
def static_method(foo=None):
return foo
@deprecated()
def instance_method(self, foo=None):
self.foo = foo
return foo
@deprecated
def instance_method2(self, foo=None):
self.foo = foo
return foo
def undecorated_method(self, foo=None):
return foo
@deprecate_arg('bah', 'foo')
def deprecated_instance_method_arg(self, foo=None):
self.foo = foo
return foo
@deprecate_arg('bah', 'foo')
@deprecate_arg('bah2', 'foo2')
@deprecate_arg('bah3', 'foo3')
@deprecate_arg('bah4', 'foo4')
def deprecated_instance_method_args(self, foo, foo2, foo3=None, foo4=None):
"""Method with many decorators to verify wrapping depth formula."""
self.foo = foo
self.foo2 = foo2
return (foo, foo2)
@deprecated_args(bah='foo', bah2='foo2')
def deprecated_instance_method_args_multi(self, foo, foo2):
self.foo = foo
self.foo2 = foo2
return (foo, foo2)
@deprecated()
@deprecate_arg('bah', 'foo')
def deprecated_instance_method_and_arg(self, foo):
self.foo = foo
return foo
@deprecate_arg('bah', 'foo')
@deprecated()
def deprecated_instance_method_and_arg2(self, foo):
self.foo = foo
return foo
@remove_last_args(['foo', 'bar'])
def deprecated_all(self):
return None
@remove_last_args(['bar'])
def deprecated_all2(self, foo):
return foo
@deprecated()
class DeprecatedClassNoInit(object):
"""Deprecated class."""
pass
@deprecated()
class DeprecatedClass(object):
"""Deprecated class."""
def __init__(self, foo=None):
self.foo = foo
class DeprecatorTestCase(DeprecationTestCase):
"""Test cases for deprecation tools."""
net = False
def test_deprecated_function_zero_arg(self):
"""Test @deprecated with functions, with zero arguments."""
rv = deprecated_func()
self.assertEqual(rv, None)
self.assertOneDeprecationParts(__name__ + '.deprecated_func')
def test_deprecated_function(self):
"""Test @deprecated with functions."""
rv = deprecated_func('a')
self.assertEqual(rv, 'a')
self.assertOneDeprecationParts(__name__ + '.deprecated_func')
rv = deprecated_func(1)
self.assertEqual(rv, 1)
self.assertOneDeprecationParts(__name__ + '.deprecated_func')
def test_deprecated_function2(self):
"""Test @deprecated with functions."""
rv = deprecated_func2('a')
self.assertEqual(rv, 'a')
self.assertOneDeprecationParts(__name__ + '.deprecated_func2')
rv = deprecated_func2(1)
self.assertEqual(rv, 1)
self.assertOneDeprecationParts(__name__ + '.deprecated_func2')
def test_deprecated_function_instead(self):
"""Test @deprecated with functions, using instead."""
rv = deprecated_func_instead('a')
self.assertEqual(rv, 'a')
self.assertOneDeprecationParts(__name__ + '.deprecated_func_instead',
'baz')
def test_deprecated_function_bad_args(self):
rv = deprecated_func_bad_args(None)
self.assertEqual(rv, None)
self.assertOneDeprecationParts(__name__ + '.deprecated_func_bad_args')
rv = deprecated_func_bad_args('a')
self.assertEqual(rv, 'a')
self.assertOneDeprecationParts(__name__ + '.deprecated_func_bad_args')
rv = deprecated_func_bad_args(1)
self.assertEqual(rv, 1)
self.assertOneDeprecationParts(__name__ + '.deprecated_func_bad_args')
f = DeprecatedMethodClass()
rv = deprecated_func_bad_args(f)
self.assertEqual(rv, f)
self.assertOneDeprecationParts(__name__ + '.deprecated_func_bad_args')
def test_deprecated_instance_method(self):
f = DeprecatedMethodClass()
rv = f.instance_method()
self.assertEqual(rv, None)
self.assertEqual(f.foo, None)
self.assertOneDeprecationParts(
__name__ + '.DeprecatedMethodClass.instance_method')
rv = f.instance_method('a')
self.assertEqual(rv, 'a')
self.assertEqual(f.foo, 'a')
self.assertOneDeprecationParts(
__name__ + '.DeprecatedMethodClass.instance_method')
rv = f.instance_method(1)
self.assertEqual(rv, 1)
self.assertEqual(f.foo, 1)
self.assertOneDeprecationParts(
__name__ + '.DeprecatedMethodClass.instance_method')
def test_deprecated_instance_method2(self):
f = DeprecatedMethodClass()
rv = f.instance_method2()
self.assertEqual(rv, None)
self.assertEqual(f.foo, None)
self.assertOneDeprecationParts(
__name__ + '.DeprecatedMethodClass.instance_method2')
def test_deprecated_class_method(self):
"""Test @deprecated with class methods."""
rv = DeprecatedMethodClass.class_method()
self.assertEqual(rv, None)
self.assertOneDeprecationParts(
__name__ + '.DeprecatedMethodClass.class_method')
rv = DeprecatedMethodClass.class_method('a')
self.assertEqual(rv, 'a')
self.assertOneDeprecationParts(
__name__ + '.DeprecatedMethodClass.class_method')
rv = DeprecatedMethodClass.class_method(1)
self.assertEqual(rv, 1)
self.assertOneDeprecationParts(
__name__ + '.DeprecatedMethodClass.class_method')
def test_deprecated_static_method_zero_args(self):
"""Test @deprecated with static methods, with zero arguments."""
rv = DeprecatedMethodClass.static_method()
self.assertEqual(rv, None)
self.assertOneDeprecationParts(
__name__ + '.DeprecatedMethodClass.static_method')
def test_deprecated_static_method(self):
"""Test @deprecated with static methods."""
rv = DeprecatedMethodClass.static_method('a')
self.assertEqual(rv, 'a')
self.assertOneDeprecationParts(
__name__ + '.DeprecatedMethodClass.static_method')
rv = DeprecatedMethodClass.static_method(1)
self.assertEqual(rv, 1)
self.assertOneDeprecationParts(
__name__ + '.DeprecatedMethodClass.static_method')
def test_deprecate_class_zero_arg(self):
"""Test @deprecated with classes, without arguments."""
df = DeprecatedClassNoInit()
self.assertEqual(df.__doc__, 'Deprecated class.')
self.assertOneDeprecationParts(__name__ + '.DeprecatedClassNoInit')
df = DeprecatedClass()
self.assertEqual(df.foo, None)
self.assertOneDeprecationParts(__name__ + '.DeprecatedClass')
def test_deprecate_class(self):
"""Test @deprecated with classes."""
df = DeprecatedClass('a')
self.assertEqual(df.foo, 'a')
self.assertOneDeprecationParts(__name__ + '.DeprecatedClass')
def test_deprecate_function_arg(self):
def tests(func):
rv = func()
self.assertEqual(rv, None)
self.assertNoDeprecation()
rv = func('a')
self.assertEqual(rv, 'a')
self.assertNoDeprecation()
rv = func(bah='b')
self.assertEqual(rv, 'b')
self.assertOneDeprecationParts(
'bah argument of ' + __name__ + '.' + func.__name__, 'foo')
self._reset_messages()
rv = func(foo=1)
self.assertEqual(rv, 1)
self.assertNoDeprecation()
self.assertRaisesRegex(
TypeError,
r"deprecated_func_arg2?\(\) got multiple values for (keyword )?argument 'foo'",
func,
'a', bah='b'
)
self._reset_messages()
tests(deprecated_func_arg)
tests(deprecated_func_arg2)
def test_deprecate_and_remove_function_args(self):
rv = deprecated_func_arg3()
self.assertEqual(rv, None)
self.assertNoDeprecation()
rv = deprecated_func_arg3(2)
self.assertEqual(rv, 2)
self.assertNoDeprecation()
rv = deprecated_func_arg3(foo=1, silent=42)
self.assertEqual(rv, 1)
self.assertDeprecationClass(PendingDeprecationWarning)
self.assertOneDeprecationParts(
'silent argument of ' + __name__ + '.deprecated_func_arg3')
rv = deprecated_func_arg3(3, loud='3')
self.assertEqual(rv, 3)
self.assertOneDeprecationParts(
'loud argument of ' + __name__ + '.deprecated_func_arg3')
rv = deprecated_func_arg3(4, old='4')
self.assertEqual(rv, 4)
self.assertOneDeprecationParts(
'old argument of ' + __name__ + '.deprecated_func_arg3')
def test_function_remove_last_args(self):
"""Test @remove_last_args on functions."""
rv = deprecated_all()
self.assertEqual(rv, None)
self.assertNoDeprecation()
rv = deprecated_all(foo=42)
self.assertEqual(rv, None)
self.assertDeprecation(
"The trailing arguments ('foo', 'bar') of %s.deprecated_all are "
"deprecated. The value(s) provided for 'foo' have been "
"dropped." % __name__)
self._reset_messages()
rv = deprecated_all(42)
self.assertEqual(rv, None)
self.assertDeprecation(
"The trailing arguments ('foo', 'bar') of %s.deprecated_all are "
"deprecated. The value(s) provided for 'foo' have been "
"dropped." % __name__)
self._reset_messages()
rv = deprecated_all(foo=42, bar=47)
self.assertEqual(rv, None)
self.assertDeprecation(
"The trailing arguments ('foo', 'bar') of %s.deprecated_all are "
"deprecated. The value(s) provided for 'foo', 'bar' have been "
"dropped." % __name__)
self._reset_messages()
rv = deprecated_all(42, 47)
self.assertEqual(rv, None)
self.assertDeprecation(
"The trailing arguments ('foo', 'bar') of %s.deprecated_all are "
"deprecated. The value(s) provided for 'foo', 'bar' have been "
"dropped." % __name__)
self._reset_messages()
rv = deprecated_all2(foo=42)
self.assertEqual(rv, 42)
self.assertNoDeprecation()
rv = deprecated_all2(42)
self.assertEqual(rv, 42)
self.assertNoDeprecation()
rv = deprecated_all2(42, bar=47)
self.assertEqual(rv, 42)
self.assertDeprecation(
"The trailing arguments ('bar') of %s.deprecated_all2 are "
"deprecated. The value(s) provided for 'bar' have been "
"dropped." % __name__)
self._reset_messages()
def test_method_remove_last_args(self):
"""Test @remove_last_args on functions."""
f = DeprecatedMethodClass()
rv = f.deprecated_all()
self.assertEqual(rv, None)
self.assertNoDeprecation()
rv = f.deprecated_all(foo=42)
self.assertEqual(rv, None)
self.assertDeprecation(
"The trailing arguments ('foo', 'bar') of "
"%s.DeprecatedMethodClass.deprecated_all are deprecated. "
"The value(s) provided for 'foo' have been dropped." % __name__)
self._reset_messages()
rv = f.deprecated_all(42)
self.assertEqual(rv, None)
self.assertDeprecation(
"The trailing arguments ('foo', 'bar') of "
"%s.DeprecatedMethodClass.deprecated_all are deprecated. "
"The value(s) provided for 'foo' have been dropped." % __name__)
self._reset_messages()
rv = f.deprecated_all(foo=42, bar=47)
self.assertEqual(rv, None)
self.assertDeprecation(
"The trailing arguments ('foo', 'bar') of "
"%s.DeprecatedMethodClass.deprecated_all are deprecated. The "
"value(s) provided for 'foo', 'bar' have been dropped." % __name__)
self._reset_messages()
rv = f.deprecated_all(42, 47)
self.assertEqual(rv, None)
self.assertDeprecation(
"The trailing arguments ('foo', 'bar') of "
"%s.DeprecatedMethodClass.deprecated_all are deprecated. The "
"value(s) provided for 'foo', 'bar' have been dropped." % __name__)
self._reset_messages()
rv = f.deprecated_all2(foo=42)
self.assertEqual(rv, 42)
self.assertNoDeprecation()
rv = f.deprecated_all2(42)
self.assertEqual(rv, 42)
self.assertNoDeprecation()
rv = f.deprecated_all2(42, bar=47)
self.assertEqual(rv, 42)
self.assertDeprecation(
"The trailing arguments ('bar') of "
"%s.DeprecatedMethodClass.deprecated_all2 are deprecated. "
"The value(s) provided for 'bar' have been dropped." % __name__)
def test_remove_last_args_invalid(self):
self.assertRaisesRegex(
TypeError,
r"(deprecated_all2\(\) missing 1 required positional argument: 'foo'|" # Python 3
"deprecated_all2\(\) takes exactly 1 argument \(0 given\))", # Python 2
deprecated_all2)
self.assertRaisesRegex(
TypeError,
r"deprecated_all2\(\) got an unexpected keyword argument 'hello'",
deprecated_all2,
hello='world')
self.assertRaisesRegex(
TypeError,
r'deprecated_all2\(\) takes (exactly )?1 (positional )?argument'
' (but 2 were given|\(2 given\))',
deprecated_all2,
1, 2, 3)
f = DeprecatedMethodClass()
self.assertRaisesRegex(
TypeError,
r"(deprecated_all2\(\) missing 1 required positional argument: 'foo'|" # Python 3
"deprecated_all2\(\) takes exactly 2 arguments \(1 given\))", # Python 2
f.deprecated_all2)
self.assertRaisesRegex(
TypeError,
r"deprecated_all2\(\) got an unexpected keyword argument 'hello'",
f.deprecated_all2,
hello='world')
self.assertRaisesRegex(
TypeError,
r'deprecated_all2\(\) takes (exactly )?2 (positional )?arguments '
'(but 3 were given|\(3 given\))',
f.deprecated_all2,
1, 2, 3)
def test_deprecated_instance_method_zero_arg(self):
"""Test @deprecate_arg with classes, without arguments."""
f = DeprecatedMethodClass()
rv = f.deprecated_instance_method_arg()
self.assertEqual(rv, None)
self.assertEqual(f.foo, None)
self.assertNoDeprecation()
def test_deprecated_instance_method_arg(self):
"""Test @deprecate_arg with instance methods."""
f = DeprecatedMethodClass()
rv = f.deprecated_instance_method_arg('a')
self.assertEqual(rv, 'a')
self.assertEqual(f.foo, 'a')
self.assertNoDeprecation()
rv = f.deprecated_instance_method_arg(bah='b')
self.assertEqual(rv, 'b')
self.assertEqual(f.foo, 'b')
self.assertOneDeprecationParts(
'bah argument of ' + __name__ + '.DeprecatedMethodClass.'
'deprecated_instance_method_arg', 'foo')
self._reset_messages()
rv = f.deprecated_instance_method_arg(foo=1)
self.assertEqual(rv, 1)
self.assertEqual(f.foo, 1)
self.assertNoDeprecation()
def test_deprecated_instance_method_args(self):
"""Test @deprecate_arg with instance methods and two args."""
f = DeprecatedMethodClass()
rv = f.deprecated_instance_method_args('a', 'b')
self.assertEqual(rv, ('a', 'b'))
self.assertNoDeprecation()
rv = f.deprecated_instance_method_args(bah='b', bah2='c')
self.assertEqual(rv, ('b', 'c'))
self.assertDeprecationParts(
'bah argument of ' + __name__ + '.DeprecatedMethodClass.'
'deprecated_instance_method_args', 'foo')
self.assertDeprecationParts(
'bah2 argument of ' + __name__ + '.DeprecatedMethodClass.'
'deprecated_instance_method_args', 'foo2')
self._reset_messages()
rv = f.deprecated_instance_method_args(foo='b', bah2='c')
self.assertEqual(rv, ('b', 'c'))
self.assertOneDeprecationParts(
'bah2 argument of ' + __name__ + '.DeprecatedMethodClass.'
'deprecated_instance_method_args', 'foo2')
rv = f.deprecated_instance_method_args(foo2='c', bah='b')
self.assertEqual(rv, ('b', 'c'))
self.assertOneDeprecationParts(
'bah argument of ' + __name__ + '.DeprecatedMethodClass.'
'deprecated_instance_method_args', 'foo')
rv = f.deprecated_instance_method_args(foo=1, foo2=2)
self.assertEqual(rv, (1, 2))
self.assertNoDeprecation()
def test_deprecated_instance_method_args_multi(self):
"""Test @deprecated_args with instance methods and two args."""
f = DeprecatedMethodClass()
rv = f.deprecated_instance_method_args_multi('a', 'b')
self.assertEqual(rv, ('a', 'b'))
self.assertNoDeprecation()
rv = f.deprecated_instance_method_args_multi(bah='b', bah2='c')
self.assertEqual(rv, ('b', 'c'))
self.assertDeprecationParts(
'bah argument of ' + __name__ + '.DeprecatedMethodClass.'
'deprecated_instance_method_args_multi', 'foo')
self.assertDeprecationParts(
'bah2 argument of ' + __name__ + '.DeprecatedMethodClass.'
'deprecated_instance_method_args_multi', 'foo2')
self._reset_messages()
rv = f.deprecated_instance_method_args_multi(foo='b', bah2='c')
self.assertEqual(rv, ('b', 'c'))
self.assertOneDeprecationParts(
'bah2 argument of ' + __name__ + '.DeprecatedMethodClass.'
'deprecated_instance_method_args_multi', 'foo2')
rv = f.deprecated_instance_method_args_multi(foo2='c', bah='b')
self.assertEqual(rv, ('b', 'c'))
self.assertOneDeprecationParts(
'bah argument of ' + __name__ + '.DeprecatedMethodClass.'
'deprecated_instance_method_args_multi', 'foo')
rv = f.deprecated_instance_method_args_multi(foo=1, foo2=2)
self.assertEqual(rv, (1, 2))
self.assertNoDeprecation()
def test_deprecated_instance_method_and_arg(self):
"""Test @deprecate_arg and @deprecated with instance methods."""
f = DeprecatedMethodClass()
rv = f.deprecated_instance_method_and_arg('a')
self.assertEqual(rv, 'a')
self.assertEqual(f.foo, 'a')
self.assertOneDeprecationParts(
__name__ + '.DeprecatedMethodClass.deprecated_instance_method_and_arg')
rv = f.deprecated_instance_method_and_arg(bah='b')
self.assertEqual(rv, 'b')
self.assertEqual(f.foo, 'b')
self.assertDeprecationParts(
__name__ + '.DeprecatedMethodClass.deprecated_instance_method_and_arg')
self.assertDeprecationParts(
'bah argument of ' + __name__ + '.DeprecatedMethodClass.'
'deprecated_instance_method_and_arg', 'foo')
self._reset_messages()
rv = f.deprecated_instance_method_and_arg(foo=1)
self.assertEqual(rv, 1)
self.assertEqual(f.foo, 1)
self.assertOneDeprecationParts(
__name__ + '.DeprecatedMethodClass.deprecated_instance_method_and_arg')
def test_deprecated_instance_method_and_arg2(self):
"""Test @deprecated and @deprecate_arg with instance methods."""
f = DeprecatedMethodClass()
rv = f.deprecated_instance_method_and_arg2('a')
self.assertEqual(rv, 'a')
self.assertEqual(f.foo, 'a')
self.assertOneDeprecationParts(
__name__ + '.DeprecatedMethodClass.deprecated_instance_method_and_arg2')
rv = f.deprecated_instance_method_and_arg2(bah='b')
self.assertEqual(rv, 'b')
self.assertEqual(f.foo, 'b')
self.assertDeprecationParts(
__name__ + '.DeprecatedMethodClass.deprecated_instance_method_and_arg2')
self.assertDeprecationParts(
'bah argument of ' + __name__ + '.DeprecatedMethodClass.'
'deprecated_instance_method_and_arg2', 'foo')
self._reset_messages()
rv = f.deprecated_instance_method_and_arg2(foo=1)
self.assertEqual(rv, 1)
self.assertEqual(f.foo, 1)
self.assertOneDeprecationParts(
__name__ + '.DeprecatedMethodClass.deprecated_instance_method_and_arg2')
if __name__ == '__main__':
try:
unittest.main()
except SystemExit:
pass
| |
"""Climate data pre-processing"""
# Built ins
import logging
from distutils.version import LooseVersion
import warnings
# External libs
import cftime
import numpy as np
import netCDF4
import xarray as xr
# Locals
from oggm import cfg
from oggm import utils
from oggm import entity_task
from oggm.exceptions import InvalidParamsError
# Module logger
log = logging.getLogger(__name__)
@entity_task(log, writes=['gcm_data'])
def process_gcm_data(gdir, filesuffix='', prcp=None, temp=None,
year_range=('1961', '1990'), scale_stddev=True,
time_unit=None, calendar=None, source=''):
""" Applies the anomaly method to GCM climate data
This function can be applied to any GCM data, if it is provided in a
suitable :py:class:`xarray.DataArray`. See Parameter description for
format details.
For CESM-LME a specific function :py:func:`tasks.process_cesm_data` is
available which does the preprocessing of the data and subsequently calls
this function.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
where to write the data
filesuffix : str
append a suffix to the filename (useful for ensemble experiments).
prcp : :py:class:`xarray.DataArray`
| monthly total precipitation [mm month-1]
| Coordinates:
| lat float64
| lon float64
| time: cftime object
temp : :py:class:`xarray.DataArray`
| monthly temperature [K]
| Coordinates:
| lat float64
| lon float64
| time cftime object
year_range : tuple of str
the year range for which you want to compute the anomalies. Default
is `('1961', '1990')`
scale_stddev : bool
whether or not to scale the temperature standard deviation as well
time_unit : str
The unit conversion for NetCDF files. It must be adapted to the
length of the time series. The default is to choose
it ourselves based on the starting year.
For example: 'days since 0850-01-01 00:00:00'
calendar : str
If you use an exotic calendar (e.g. 'noleap')
source : str
For metadata: the source of the climate data
"""
# Standard sanity checks
months = temp['time.month']
if months[0] != 1:
raise ValueError('We expect the files to start in January!')
if months[-1] < 10:
raise ValueError('We expect the files to end in December!')
if (np.abs(temp['lon']) > 180) or (np.abs(prcp['lon']) > 180):
raise ValueError('We expect the longitude coordinates to be within '
'[-180, 180].')
# from normal years to hydrological years
sm = cfg.PARAMS['hydro_month_' + gdir.hemisphere]
if sm != 1:
prcp = prcp[sm-1:sm-13].load()
temp = temp[sm-1:sm-13].load()
assert len(prcp) // 12 == len(prcp) / 12, 'Somehow we didn\'t get full years'
assert len(temp) // 12 == len(temp) / 12, 'Somehow we didn\'t get full years'
# Get the reference data to apply the anomaly to
fpath = gdir.get_filepath('climate_historical')
with xr.open_dataset(fpath) as ds_ref:
ds_ref = ds_ref.sel(time=slice(*year_range))
# compute monthly anomalies
# of temp
if scale_stddev:
# This is a bit more arithmetic
ts_tmp_sel = temp.sel(time=slice(*year_range))
if len(ts_tmp_sel) // 12 != len(ts_tmp_sel) / 12:
raise InvalidParamsError('year_range cannot contain the first'
'or last calendar year in the series')
if ((len(ts_tmp_sel) // 12) % 2) == 1:
raise InvalidParamsError('We need an even number of years '
'for this to work')
ts_tmp_std = ts_tmp_sel.groupby('time.month').std(dim='time')
std_fac = ds_ref.temp.groupby('time.month').std(dim='time') / ts_tmp_std
if sm != 1:
# Just to avoid useless roll
std_fac = std_fac.roll(month=13-sm, roll_coords=True)
std_fac = np.tile(std_fac.data, len(temp) // 12)
# We need an even number of years for this to work
win_size = len(ts_tmp_sel) + 1
def roll_func(x, axis=None):
x = x[:, ::12]
n = len(x[0, :]) // 2
xm = np.nanmean(x, axis=axis)
return xm + (x[:, n] - xm) * std_fac
temp = temp.rolling(time=win_size, center=True,
min_periods=1).reduce(roll_func)
ts_tmp_sel = temp.sel(time=slice(*year_range))
if len(ts_tmp_sel.time) != len(ds_ref.time):
raise InvalidParamsError('The reference climate period and the '
'GCM period after window selection do '
'not match.')
ts_tmp_avg = ts_tmp_sel.groupby('time.month').mean(dim='time')
ts_tmp = temp.groupby('time.month') - ts_tmp_avg
# of precip -- scaled anomalies
ts_pre_avg = prcp.sel(time=slice(*year_range))
ts_pre_avg = ts_pre_avg.groupby('time.month').mean(dim='time')
ts_pre_ano = prcp.groupby('time.month') - ts_pre_avg
# scaled anomalies is the default. Standard anomalies above
# are used later for where ts_pre_avg == 0
ts_pre = prcp.groupby('time.month') / ts_pre_avg
# for temp
loc_tmp = ds_ref.temp.groupby('time.month').mean()
ts_tmp = ts_tmp.groupby('time.month') + loc_tmp
# for prcp
loc_pre = ds_ref.prcp.groupby('time.month').mean()
# scaled anomalies
ts_pre = ts_pre.groupby('time.month') * loc_pre
# standard anomalies
ts_pre_ano = ts_pre_ano.groupby('time.month') + loc_pre
# Correct infinite values with standard anomalies
ts_pre.values = np.where(np.isfinite(ts_pre.values),
ts_pre.values,
ts_pre_ano.values)
# The previous step might create negative values (unlikely). Clip them
ts_pre.values = utils.clip_min(ts_pre.values, 0)
assert np.all(np.isfinite(ts_pre.values))
assert np.all(np.isfinite(ts_tmp.values))
gdir.write_monthly_climate_file(temp.time.values,
ts_pre.values, ts_tmp.values,
float(ds_ref.ref_hgt),
prcp.lon.values, prcp.lat.values,
time_unit=time_unit,
calendar=calendar,
file_name='gcm_data',
source=source,
filesuffix=filesuffix)
@entity_task(log, writes=['gcm_data'])
def process_cesm_data(gdir, filesuffix='', fpath_temp=None, fpath_precc=None,
fpath_precl=None, **kwargs):
"""Processes and writes CESM climate data for this glacier.
This function is made for interpolating the Community
Earth System Model Last Millennium Ensemble (CESM-LME) climate simulations,
from Otto-Bliesner et al. (2016), to the high-resolution CL2 climatologies
(provided with OGGM) and writes everything to a NetCDF file.
Parameters
----------
gdir : :py:class:`oggm.GlacierDirectory`
where to write the data
filesuffix : str
append a suffix to the filename (useful for ensemble experiments).
fpath_temp : str
path to the temp file (default: cfg.PATHS['cesm_temp_file'])
fpath_precc : str
path to the precc file (default: cfg.PATHS['cesm_precc_file'])
fpath_precl : str
path to the precl file (default: cfg.PATHS['cesm_precl_file'])
**kwargs: any kwarg to be passed to ref:`process_gcm_data`
"""
# CESM temperature and precipitation data
if fpath_temp is None:
if not ('cesm_temp_file' in cfg.PATHS):
raise ValueError("Need to set cfg.PATHS['cesm_temp_file']")
fpath_temp = cfg.PATHS['cesm_temp_file']
if fpath_precc is None:
if not ('cesm_precc_file' in cfg.PATHS):
raise ValueError("Need to set cfg.PATHS['cesm_precc_file']")
fpath_precc = cfg.PATHS['cesm_precc_file']
if fpath_precl is None:
if not ('cesm_precl_file' in cfg.PATHS):
raise ValueError("Need to set cfg.PATHS['cesm_precl_file']")
fpath_precl = cfg.PATHS['cesm_precl_file']
# read the files
if LooseVersion(xr.__version__) < LooseVersion('0.11'):
raise ImportError('This task needs xarray v0.11 or newer to run.')
tempds = xr.open_dataset(fpath_temp)
precpcds = xr.open_dataset(fpath_precc)
preclpds = xr.open_dataset(fpath_precl)
# Get the time right - i.e. from time bounds
# Fix for https://github.com/pydata/xarray/issues/2565
with utils.ncDataset(fpath_temp, mode='r') as nc:
time_unit = nc.variables['time'].units
calendar = nc.variables['time'].calendar
try:
# xarray v0.11
time = netCDF4.num2date(tempds.time_bnds[:, 0], time_unit,
calendar=calendar)
except TypeError:
# xarray > v0.11
time = tempds.time_bnds[:, 0].values
# select for location
lon = gdir.cenlon
lat = gdir.cenlat
# CESM files are in 0-360
if lon <= 0:
lon += 360
# take the closest
# Should we consider GCM interpolation?
temp = tempds.TREFHT.sel(lat=lat, lon=lon, method='nearest')
prcp = (precpcds.PRECC.sel(lat=lat, lon=lon, method='nearest') +
preclpds.PRECL.sel(lat=lat, lon=lon, method='nearest'))
temp['time'] = time
prcp['time'] = time
temp.lon.values = temp.lon if temp.lon <= 180 else temp.lon - 360
prcp.lon.values = prcp.lon if prcp.lon <= 180 else prcp.lon - 360
# Convert m s-1 to mm mth-1
if time[0].month != 1:
raise ValueError('We expect the files to start in January!')
ny, r = divmod(len(time), 12)
assert r == 0
ndays = np.tile(cfg.DAYS_IN_MONTH, ny)
prcp = prcp * ndays * (60 * 60 * 24 * 1000)
tempds.close()
precpcds.close()
preclpds.close()
# Here:
# - time_unit='days since 0850-01-01 00:00:00'
# - calendar='noleap'
process_gcm_data(gdir, filesuffix=filesuffix, prcp=prcp, temp=temp,
time_unit=time_unit, calendar=calendar, **kwargs)
@entity_task(log, writes=['gcm_data'])
def process_cmip5_data(*args, **kwargs):
"""Renamed to process_cmip_data.
"""
warnings.warn('The task `process_cmip5_data` is deprecated and renamed '
'to `process_cmip_data`.', FutureWarning)
process_cmip_data(*args, **kwargs)
@entity_task(log, writes=['gcm_data'])
def process_cmip_data(gdir, filesuffix='', fpath_temp=None,
fpath_precip=None, **kwargs):
"""Read, process and store the CMIP5 and CMIP6 climate data for this glacier.
It stores the data in a format that can be used by the OGGM mass balance
model and in the glacier directory.
Currently, this function is built for the CMIP5 and CMIP6 projection
simulations that are on the OGGM servers.
Parameters
----------
filesuffix : str
append a suffix to the filename (useful for ensemble experiments).
fpath_temp : str
path to the temp file (default: cfg.PATHS['cmip5_temp_file'])
fpath_precip : str
path to the precip file (default: cfg.PATHS['cmip5_precip_file'])
**kwargs: any kwarg to be passed to ref:`process_gcm_data`
"""
# Get the path of GCM temperature & precipitation data
if fpath_temp is None:
if not ('cmip5_temp_file' in cfg.PATHS):
raise ValueError("Need to set cfg.PATHS['cmip5_temp_file']")
fpath_temp = cfg.PATHS['cmip5_temp_file']
if fpath_precip is None:
if not ('cmip5_precip_file' in cfg.PATHS):
raise ValueError("Need to set cfg.PATHS['cmip5_precip_file']")
fpath_precip = cfg.PATHS['cmip5_precip_file']
# Glacier location
glon = gdir.cenlon
glat = gdir.cenlat
# Read the GCM files
with xr.open_dataset(fpath_temp, use_cftime=True) as tempds, \
xr.open_dataset(fpath_precip, use_cftime=True) as precipds:
# Check longitude conventions
if tempds.lon.min() >= 0 and glon <= 0:
glon += 360
# Take the closest to the glacier
# Should we consider GCM interpolation?
temp = tempds.tas.sel(lat=glat, lon=glon, method='nearest')
precip = precipds.pr.sel(lat=glat, lon=glon, method='nearest')
# Back to [-180, 180] for OGGM
temp.lon.values = temp.lon if temp.lon <= 180 else temp.lon - 360
precip.lon.values = precip.lon if precip.lon <= 180 else precip.lon - 360
# Convert kg m-2 s-1 to mm mth-1 => 1 kg m-2 = 1 mm !!!
assert 'kg m-2 s-1' in precip.units, 'Precip units not understood'
ny, r = divmod(len(temp), 12)
assert r == 0
dimo = [cfg.DAYS_IN_MONTH[m - 1] for m in temp['time.month']]
precip = precip * dimo * (60 * 60 * 24)
process_gcm_data(gdir, filesuffix=filesuffix, prcp=precip, temp=temp,
source=filesuffix, **kwargs)
@entity_task(log, writes=['gcm_data'])
def process_lmr_data(gdir, fpath_temp=None, fpath_precip=None,
year_range=('1951', '1980'), filesuffix='', **kwargs):
"""Read, process and store the Last Millennium Reanalysis (LMR) data for this glacier.
LMR data: https://atmos.washington.edu/~hakim/lmr/LMRv2/
LMR data is annualised in anomaly format relative to 1951-1980. We
create synthetic timeseries from the reference data.
It stores the data in a format that can be used by the OGGM mass balance
model and in the glacier directory.
Parameters
----------
fpath_temp : str
path to the temp file (default: LMR v2.1 from server above)
fpath_precip : str
path to the precip file (default: LMR v2.1 from server above)
year_range : tuple of str
the year range for which you want to compute the anomalies. Default
for LMR is `('1951', '1980')`
filesuffix : str
append a suffix to the filename (useful for ensemble experiments).
**kwargs: any kwarg to be passed to ref:`process_gcm_data`
"""
# Get the path of GCM temperature & precipitation data
base_url = 'https://atmos.washington.edu/%7Ehakim/lmr/LMRv2/'
if fpath_temp is None:
with utils.get_lock():
fpath_temp = utils.file_downloader(base_url + 'air_MCruns_ensemble_mean_LMRv2.1.nc')
if fpath_precip is None:
with utils.get_lock():
fpath_precip = utils.file_downloader(
base_url + 'prate_MCruns_ensemble_mean_LMRv2.1.nc')
# Glacier location
glon = gdir.cenlon
glat = gdir.cenlat
# Read the GCM files
with xr.open_dataset(fpath_temp, use_cftime=True) as tempds, \
xr.open_dataset(fpath_precip, use_cftime=True) as precipds:
# Check longitude conventions
if tempds.lon.min() >= 0 and glon <= 0:
glon += 360
# Take the closest to the glacier
# Should we consider GCM interpolation?
temp = tempds.air.sel(lat=glat, lon=glon, method='nearest')
precip = precipds.prate.sel(lat=glat, lon=glon, method='nearest')
# Currently we just take the mean of the ensemble, although
# this is probably not advised. The GCM climate will correct
# anyways
temp = temp.mean(dim='MCrun')
precip = precip.mean(dim='MCrun')
# Precip unit is kg/m^2/s we convert to mm month since we apply the anomaly after
precip = precip * 30.5 * (60 * 60 * 24)
# Back to [-180, 180] for OGGM
temp.lon.values = temp.lon if temp.lon <= 180 else temp.lon - 360
precip.lon.values = precip.lon if precip.lon <= 180 else precip.lon - 360
# OK now we have to turn these annual timeseries in monthly data
# We take the ref climate
fpath = gdir.get_filepath('climate_historical')
with xr.open_dataset(fpath) as ds_ref:
ds_ref = ds_ref.sel(time=slice(*year_range))
loc_tmp = ds_ref.temp.groupby('time.month').mean()
loc_pre = ds_ref.prcp.groupby('time.month').mean()
# Make time coord
t = np.cumsum([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] * len(temp))
t = cftime.num2date(np.append([0], t[:-1]), 'days since 0000-01-01 00:00:00',
calendar='noleap')
temp = xr.DataArray((loc_tmp.data + temp.data[:, np.newaxis]).flatten(),
coords={'time': t, 'lon': temp.lon, 'lat': temp.lat},
dims=('time',))
# For precip the std dev is very small - lets keep it as is for now but
# this is a bit ridiculous. We clip to zero here to be sure
precip = utils.clip_min((loc_pre.data + precip.data[:, np.newaxis]).flatten(), 0)
precip = xr.DataArray(precip, dims=('time',),
coords={'time': t, 'lon': temp.lon, 'lat': temp.lat})
process_gcm_data(gdir, filesuffix=filesuffix, prcp=precip, temp=temp,
year_range=year_range, calendar='noleap',
source='lmr', **kwargs)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryImageName": _SERIALIZER.url("gallery_image_name", gallery_image_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryImageName": _SERIALIZER.url("gallery_image_name", gallery_image_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryImageName": _SERIALIZER.url("gallery_image_name", gallery_image_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
"galleryImageName": _SERIALIZER.url("gallery_image_name", gallery_image_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_gallery_request(
subscription_id: str,
resource_group_name: str,
gallery_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-09-30"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"galleryName": _SERIALIZER.url("gallery_name", gallery_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class GalleryImagesOperations(object):
"""GalleryImagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_09_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImage",
**kwargs: Any
) -> "_models.GalleryImage":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_image, 'GalleryImage')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImage",
**kwargs: Any
) -> LROPoller["_models.GalleryImage"]:
"""Create or update a gallery image definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to
be created.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition to be created or updated.
The allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the
middle. The maximum length is 80 characters.
:type gallery_image_name: str
:param gallery_image: Parameters supplied to the create or update gallery image operation.
:type gallery_image: ~azure.mgmt.compute.v2020_09_30.models.GalleryImage
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either GalleryImage or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2020_09_30.models.GalleryImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image=gallery_image,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImageUpdate",
**kwargs: Any
) -> "_models.GalleryImage":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(gallery_image, 'GalleryImageUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image: "_models.GalleryImageUpdate",
**kwargs: Any
) -> LROPoller["_models.GalleryImage"]:
"""Update a gallery image definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to
be updated.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition to be updated. The allowed
characters are alphabets and numbers with dots, dashes, and periods allowed in the middle. The
maximum length is 80 characters.
:type gallery_image_name: str
:param gallery_image: Parameters supplied to the update gallery image operation.
:type gallery_image: ~azure.mgmt.compute.v2020_09_30.models.GalleryImageUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either GalleryImage or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2020_09_30.models.GalleryImage]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image=gallery_image,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> "_models.GalleryImage":
"""Retrieves information about a gallery image definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery from which the Image Definitions are
to be retrieved.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition to be retrieved.
:type gallery_image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GalleryImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_09_30.models.GalleryImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Delete a gallery image.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition is to
be deleted.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition to be deleted.
:type gallery_image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}'} # type: ignore
@distributed_trace
def list_by_gallery(
self,
resource_group_name: str,
gallery_name: str,
**kwargs: Any
) -> Iterable["_models.GalleryImageList"]:
"""List gallery image definitions in a gallery.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery from which Image Definitions are to
be listed.
:type gallery_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryImageList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2020_09_30.models.GalleryImageList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImageList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_gallery_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
template_url=self.list_by_gallery.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_gallery_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
gallery_name=gallery_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("GalleryImageList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_gallery.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images'} # type: ignore
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class EndpointsOperations(object):
"""EndpointsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def list_by_profile(
self, resource_group_name, profile_name, custom_headers=None, raw=False, **operation_config):
"""Lists existing CDN endpoints.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`EndpointPaged <azure.mgmt.cdn.models.EndpointPaged>`
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.EndpointPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.EndpointPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get(
self, resource_group_name, profile_name, endpoint_name, custom_headers=None, raw=False, **operation_config):
"""Gets an existing CDN endpoint with the specified endpoint name under
the specified subscription, resource group and profile.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is
unique globally.
:type endpoint_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`Endpoint <azure.mgmt.cdn.models.Endpoint>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Endpoint', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, profile_name, endpoint_name, endpoint, custom_headers=None, raw=False, **operation_config):
"""Creates a new CDN endpoint with the specified endpoint name under the
specified subscription, resource group and profile.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is
unique globally.
:type endpoint_name: str
:param endpoint: Endpoint properties
:type endpoint: :class:`Endpoint <azure.mgmt.cdn.models.Endpoint>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Endpoint
<azure.mgmt.cdn.models.Endpoint>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(endpoint, 'Endpoint')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Endpoint', response)
if response.status_code == 201:
deserialized = self._deserialize('Endpoint', response)
if response.status_code == 202:
deserialized = self._deserialize('Endpoint', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def update(
self, resource_group_name, profile_name, endpoint_name, endpoint_update_properties, custom_headers=None, raw=False, **operation_config):
"""Updates an existing CDN endpoint with the specified endpoint name under
the specified subscription, resource group and profile. Only tags and
Origin HostHeader can be updated after creating an endpoint. To update
origins, use the Update Origin operation. To update custom domains, use
the Update Custom Domain operation.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is
unique globally.
:type endpoint_name: str
:param endpoint_update_properties: Endpoint update properties
:type endpoint_update_properties: :class:`EndpointUpdateParameters
<azure.mgmt.cdn.models.EndpointUpdateParameters>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Endpoint
<azure.mgmt.cdn.models.Endpoint>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(endpoint_update_properties, 'EndpointUpdateParameters')
# Construct and send request
def long_running_send():
request = self._client.patch(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Endpoint', response)
if response.status_code == 202:
deserialized = self._deserialize('Endpoint', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def delete(
self, resource_group_name, profile_name, endpoint_name, custom_headers=None, raw=False, **operation_config):
"""Deletes an existing CDN endpoint with the specified endpoint name under
the specified subscription, resource group and profile.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is
unique globally.
:type endpoint_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def start(
self, resource_group_name, profile_name, endpoint_name, custom_headers=None, raw=False, **operation_config):
"""Starts an existing CDN endpoint that is on a stopped state.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is
unique globally.
:type endpoint_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Endpoint
<azure.mgmt.cdn.models.Endpoint>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/start'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('Endpoint', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def stop(
self, resource_group_name, profile_name, endpoint_name, custom_headers=None, raw=False, **operation_config):
"""Stops an existing running CDN endpoint.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is
unique globally.
:type endpoint_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`Endpoint
<azure.mgmt.cdn.models.Endpoint>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/stop'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 202:
deserialized = self._deserialize('Endpoint', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def purge_content(
self, resource_group_name, profile_name, endpoint_name, content_paths, custom_headers=None, raw=False, **operation_config):
"""Removes a content from CDN.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is
unique globally.
:type endpoint_name: str
:param content_paths: The path to the content to be purged. Can
describe a file path or a wild card directory.
:type content_paths: list of str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
content_file_paths = models.PurgeParameters(content_paths=content_paths)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/purge'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(content_file_paths, 'PurgeParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def load_content(
self, resource_group_name, profile_name, endpoint_name, content_paths, custom_headers=None, raw=False, **operation_config):
"""Pre-loads a content to CDN. Available for Verizon Profiles.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is
unique globally.
:type endpoint_name: str
:param content_paths: The path to the content to be loaded. Path
should be a relative file URL of the origin.
:type content_paths: list of str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
content_file_paths = models.LoadParameters(content_paths=content_paths)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/load'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(content_file_paths, 'LoadParameters')
# Construct and send request
def long_running_send():
request = self._client.post(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def validate_custom_domain(
self, resource_group_name, profile_name, endpoint_name, host_name, custom_headers=None, raw=False, **operation_config):
"""Validates the custom domain mapping to ensure it maps to the correct
CDN endpoint in DNS.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is
unique globally.
:type endpoint_name: str
:param host_name: The host name of the custom domain. Must be a domain
name.
:type host_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ValidateCustomDomainOutput
<azure.mgmt.cdn.models.ValidateCustomDomainOutput>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
custom_domain_properties = models.ValidateCustomDomainInput(host_name=host_name)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/validateCustomDomain'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(custom_domain_properties, 'ValidateCustomDomainInput')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ValidateCustomDomainOutput', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def list_resource_usage(
self, resource_group_name, profile_name, endpoint_name, custom_headers=None, raw=False, **operation_config):
"""Checks the quota and usage of geo filters and custom domains under the
given endpoint.
:param resource_group_name: Name of the Resource group within the
Azure subscription.
:type resource_group_name: str
:param profile_name: Name of the CDN profile which is unique within
the resource group.
:type profile_name: str
:param endpoint_name: Name of the endpoint under the profile which is
unique globally.
:type endpoint_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`ResourceUsagePaged
<azure.mgmt.cdn.models.ResourceUsagePaged>`
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Cdn/profiles/{profileName}/endpoints/{endpointName}/checkResourceUsage'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+$'),
'profileName': self._serialize.url("profile_name", profile_name, 'str'),
'endpointName': self._serialize.url("endpoint_name", endpoint_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.ResourceUsagePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ResourceUsagePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import os
import glob
import shutil
from calvin.csparser.parser import calvin_parser
from calvin.csparser.checker import check
from calvin.actorstore import store
from calvin.utilities import certificate
from calvin.actorstore.store import ActorStore
def check_script(file):
try:
with open(file, 'r') as source:
source_text = source.read()
except:
return {}, [{'reason': 'File not found', 'line': 0, 'col': 0}], []
# Steps taken:
# 1) parser .calvin file -> IR. May produce syntax errors/warnings
# 2) checker IR -> IR. May produce syntax errors/warnings
ir, errors, warnings = calvin_parser(source_text, file)
# If there were errors during parsing no IR will be generated
if not errors:
c_errors, c_warnings = check(ir)
errors.extend(c_errors)
warnings.extend(c_warnings)
return ir, errors, warnings
def install_component(namespace, name, definition, overwrite):
astore = store.ActorStore()
return astore.add_component(namespace, name, definition, overwrite)
def parse_args():
long_desc = """Manage the host's actor store and credentials"""
# top level arguments
argparser = argparse.ArgumentParser(description=long_desc)
cmdparsers = argparser.add_subparsers(help="command help")
# parser for install cmd
install_commands = ['component', 'actor']
cmd_install = cmdparsers.add_parser('install', help='install components and actors')
cmd_install.add_argument('cmd', metavar='<command>', choices=install_commands, type=str,
help="one of %s" % ", ".join(install_commands))
cmd_install.add_argument('--force', dest='force', action='store_true',
help='overwrite components or actor that exists at destination')
cmd_install.add_argument('--sign', dest='sign', action='store_true',
help='sign actor or component')
cmd_install.add_argument('--org', metavar='<name>', dest='org', type=str,
help='Code Signer org name used, assumes default location when no calvin.conf')
cmd_install.add_argument('--namespace', metavar='<ns.sub-ns>', type=str, required=True,
help='namespace to install actor or components under')
aargs = cmd_install.add_argument_group("actor")
aargs.add_argument('--actor', metavar='<path>', action='append', default=[], required=True,
help='actor file to install, can be repeated')
gargs = cmd_install.add_argument_group("component")
gargs.add_argument('--script', metavar='<path>', type=str, required=True,
help='script file with component definitions')
whichcomp = gargs.add_mutually_exclusive_group(required=True)
whichcomp.add_argument('--all', dest='component', action='store_const', const=[],
help='install all components found in script')
whichcomp.add_argument('--component', metavar='<component>', type=str, nargs='+',
help='name of component(s) to install')
gargs.add_argument('--issue-fmt', dest='fmt', type=str,
default='{issue_type}: {reason} {script} [{line}:{col}]',
help='custom format for issue reporting.')
cmd_install.set_defaults(func=manage_install)
# parser for trust cmd
trust_commands = ['trust']
cmd_trust = cmdparsers.add_parser('trust', help='manage trusted certificates')
etargs = cmd_trust.add_argument_group("mandatory argument")
etargs.add_argument('--path', metavar='<path>', type=str,
help='certificate to trust')
cmd_trust.add_argument('--dir', metavar='<directory>', type=str, default="",
help='security directory, defaults to ~/.calvin/security')
cmd_trust.set_defaults(func=manage_trust)
# parser for sign cmd
# Might later need to specify what is signed to add extra verification
# sign_commands = ['app', 'component', 'actor']
cmd_sign = cmdparsers.add_parser('sign', help='sign a file')
# cmd_sign.add_argument('cmd', metavar='<command>', choices=sign_commands, type=str,
# help="one of %s" % ", ".join(sign_commands))
cmd_sign.add_argument('--org', metavar='<name>', dest='org', type=str, required=True,
help='Code Signer org name used')
cmd_sign.add_argument('--dir', metavar='<directory>', type=str, default="",
help='security directory, defaults to ~/.calvin/security')
cmd_sign.add_argument('--file', metavar='<path>', action='append', default=[],
help='file to sign, can be repeated')
storeargs = cmd_sign.add_argument_group("actor and component")
storeargs.add_argument('--nsfile', metavar='<ns.sub-ns.actor>', action='append', default=[],
help='namespaced store path to actor or components, can be repeated')
cmd_sign.set_defaults(func=manage_sign)
# parser for CA cmd
ca_commands = ['create', 'remove', 'export']
cmd_ca = cmdparsers.add_parser('ca', help='manage CA')
cmd_ca.add_argument('cmd', metavar='<command>', choices=ca_commands, type=str,
help="one of %s" % ", ".join(ca_commands))
etargs = cmd_ca.add_argument_group("export")
etargs.add_argument('--path', metavar='<path>', type=str,
help='export to directory')
cargs = cmd_ca.add_argument_group("create")
cmd_ca.add_argument('--force', dest='force', action='store_true',
help='overwrite file that exists at destination')
cmd_ca.add_argument('--domain', metavar='<name>', dest='domain', type=str, required=True,
help='CA domain name used')
cargs.add_argument('--name', metavar='<commonName>', type=str,
help='common name of Certificate Authority')
cmd_ca.add_argument('--dir', metavar='<directory>', type=str, default="",
help='security directory, defaults to ~/.calvin/security')
cmd_ca.set_defaults(func=manage_ca)
# parser for code_signer cmd
cs_commands = ['create', 'remove', 'export']
cmd_cs = cmdparsers.add_parser('code_signer', help='manage Code Signer')
cmd_cs.add_argument('cmd', metavar='<command>', choices=cs_commands, type=str,
help="one of %s" % ", ".join(cs_commands))
etargs = cmd_cs.add_argument_group("export")
etargs.add_argument('--path', metavar='<path>', type=str,
help='export to directory')
cargs = cmd_cs.add_argument_group("create")
cmd_cs.add_argument('--force', dest='force', action='store_true',
help='overwrite file that exists at destination')
cmd_cs.add_argument('--org', metavar='<name>', dest='org', type=str, required=True,
help='Organizational name used')
cargs.add_argument('--name', metavar='<commonName>', type=str,
help='common name of Code Signer')
cmd_cs.add_argument('--dir', metavar='<directory>', type=str, default="",
help='security directory, defaults to ~/.calvin/security')
cmd_cs.set_defaults(func=manage_cs)
return argparser.parse_args()
def manage_install(args):
def report_issues(issues, issue_type, file=''):
sorted_issues = sorted(issues, key=lambda k: k.get('line', 0))
for issue in sorted_issues:
sys.stderr.write(args.fmt.format(script=file, issue_type=issue_type, **issue) + '\n')
ir, errors, warnings = check_script(args.script)
if warnings:
report_issues(warnings, 'Warning', args.script)
if errors:
report_issues(errors, 'Error', args.script)
return 1
errors = []
for comp_name, comp_def in ir['components'].items():
if args.component and comp_name not in args.component:
continue
ok = install_component(args.namespace, comp_name, comp_def, args.overwrite)
if not ok:
errors.append({'reason': 'Failed to install "{0}"'.format(comp_name),
'line': comp_def['dbg_line'], 'col': 0})
if errors:
report_issues(errors, 'Error', args.script)
return 1
def manage_trust(args):
if not args.path:
raise Exception("No path supplied")
cert_name = os.path.basename(args.path)
if args.dir:
truststore_cert = os.path.join(args.dir, "trustStore", cert_name)
else:
homefolder = os.getenv("HOME")
truststore_cert = os.path.join(homefolder, ".calvin", "security", "trustStore", cert_name)
if not os.path.isdir(os.path.dirname(truststore_cert)):
os.makedirs(os.path.dirname(truststore_cert), 0700)
shutil.copy(args.path, truststore_cert)
def manage_sign(args):
# Collect files to sign
files = []
if args.file:
for f in args.file:
files.extend(glob.glob(f))
if args.nsfile:
store = ActorStore()
for m in args.nsfile:
files.extend(store.actor_paths(m))
# Filter out any files not *.calvin, *.comp, *.py
files = [f for f in files if f.endswith(('.calvin', '.comp', '.py')) and not f.endswith('__init__.py')]
if not files:
raise Exception("No (*.calvin, *.comp, *py) files supplied")
if not args.org:
raise Exception("No org supplied")
configfile = os.path.join(args.dir, args.org, "openssl.conf") if args.dir else None
# When conf missing the exception is printed by main
conf = certificate.Config(configfile=configfile, domain=args.org, readonly=True)
exceptions = []
for f in files:
try:
certificate.sign_file(conf, f)
except Exception as e:
exceptions.append(e)
for e in exceptions:
print "Error {}".format(e)
def manage_ca(args):
if args.cmd == 'create' and args.domain and args.name:
if not args.domain:
raise Exception("No domain supplied")
configfile = os.path.join(args.dir, args.domain, "openssl.conf") if args.dir else None
conf = certificate.Config(configfile=configfile, domain=args.domain, commonName=args.name, force=args.force)
certificate.new_domain(conf)
elif args.cmd == 'remove':
if not args.domain:
raise Exception("No domain supplied")
domaindir = os.path.join(args.dir, args.domain) if args.dir else None
certificate.remove_domain(args.domain, domaindir)
elif args.cmd == 'export':
if not args.domain:
raise Exception("No domain supplied")
if not args.path:
raise Exception("No path supplied")
configfile = os.path.join(args.dir, args.domain, "openssl.conf") if args.dir else None
conf = certificate.Config(configfile=configfile, domain=args.domain, readonly=True)
certificate.copy_cert(conf, args.path)
def manage_cs(args):
if args.cmd == 'create' and args.org and args.name:
if not args.org:
raise Exception("No organization supplied")
configfile = os.path.join(args.dir, args.org, "openssl.conf") if args.dir else None
conf = certificate.Config(configfile=configfile, domain=args.org, commonName=args.name, force=args.force)
certificate.new_domain(conf)
# Add certificate to truststore
if args.dir:
truststore_path = os.path.join(args.dir, "trustStore")
else:
homefolder = os.getenv("HOME")
truststore_path = os.path.join(homefolder, ".calvin", "security", "trustStore")
if not os.path.isdir(truststore_path):
os.makedirs(truststore_path, 0700)
certificate.copy_cert(conf, truststore_path)
elif args.cmd == 'remove':
if not args.org:
raise Exception("No organization supplied")
orgdir = os.path.join(args.dir, args.org) if args.dir else None
# Remove certificate from truststore
configfile = os.path.join(orgdir, "openssl.conf") if args.dir else None
conf = certificate.Config(configfile=configfile, domain=args.org, readonly=True)
cert_file = conf.configuration["CA_default"]["certificate"]
if args.dir:
truststore_path = os.path.join(args.dir, "trustStore")
else:
homefolder = os.getenv("HOME")
truststore_path = os.path.join(homefolder, ".calvin", "security", "trustStore")
try:
os.remove(os.path.join(truststore_path, certificate.cert_hash(cert_file) + ".0"))
except OSError:
pass # The certificate is not in the truststore
certificate.remove_domain(args.org, orgdir)
elif args.cmd == 'export':
if not args.org:
raise Exception("No organization supplied")
if not args.path:
raise Exception("No path supplied")
configfile = os.path.join(args.dir, args.org, "openssl.conf") if args.dir else None
conf = certificate.Config(configfile=configfile, domain=args.org, readonly=True)
certificate.copy_cert(conf, args.path)
def main():
args = parse_args()
try:
args.func(args)
except Exception as e:
print "Error {}".format(e)
if __name__ == '__main__':
sys.exit(main())
| |
import copy
import importlib
import json
import sys
try:
from unittest import mock
except ImportError:
import mock
try:
import fastapi
FASTAPI_INSTALLED = True
ALLOWED_FASTAPI_VERSION = fastapi.__version__ >= '0.41.0'
except ImportError:
FASTAPI_INSTALLED = False
ALLOWED_FASTAPI_VERSION = False
import unittest2
import rollbar
from rollbar.lib._async import AsyncMock
from rollbar.test import BaseTest
ALLOWED_PYTHON_VERSION = sys.version_info >= (3, 6)
@unittest2.skipUnless(
FASTAPI_INSTALLED and ALLOWED_PYTHON_VERSION, 'FastAPI requires Python3.6+'
)
class LoggingRouteUnsupportedFastAPIVersionTest(BaseTest):
def test_should_disable_loading_route_handler_if_fastapi_is_too_old(self):
import logging
import fastapi
from fastapi import FastAPI
from fastapi.routing import APIRoute
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
from rollbar.contrib.fastapi.utils import FastAPIVersionError
logging.disable(logging.ERROR) # silent logger for tests
fastapi_version = fastapi.__version__
app = FastAPI()
old_route_class = app.router.route_class
self.assertEqual(old_route_class, APIRoute)
fastapi.__version__ = '0'
with self.assertRaises(FastAPIVersionError):
rollbar_add_to(app)
fastapi.__version__ = '0.30.3'
with self.assertRaises(FastAPIVersionError):
rollbar_add_to(app)
fastapi.__version__ = '0.40.10'
with self.assertRaises(FastAPIVersionError):
rollbar_add_to(app)
self.assertEqual(app.router.route_class, old_route_class)
logging.disable(logging.NOTSET) # make sure logger is re-enabled
fastapi.__version__ = fastapi_version
@unittest2.skipUnless(
FASTAPI_INSTALLED and ALLOWED_PYTHON_VERSION, 'FastAPI requires Python3.6+'
)
@unittest2.skipUnless(ALLOWED_FASTAPI_VERSION, 'FastAPI v0.41.0+ is required')
class LoggingRouteTest(BaseTest):
default_settings = copy.deepcopy(rollbar.SETTINGS)
def setUp(self):
importlib.reload(rollbar)
rollbar.SETTINGS = copy.deepcopy(self.default_settings)
rollbar.SETTINGS['handler'] = 'async'
@mock.patch('rollbar.report_exc_info')
def test_should_catch_and_report_errors(self, mock_report):
from fastapi import FastAPI
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
try:
from fastapi.testclient import TestClient
except ImportError: # Added in FastAPI v0.51.0+
from starlette.testclient import TestClient
app = FastAPI()
rollbar_add_to(app)
@app.get('/')
async def read_root():
1 / 0
client = TestClient(app)
with self.assertRaises(ZeroDivisionError):
client.get('/')
mock_report.assert_called_once()
args, kwargs = mock_report.call_args
self.assertEqual(kwargs, {})
exc_type, exc_value, exc_tb = args[0]
self.assertEqual(exc_type, ZeroDivisionError)
self.assertIsInstance(exc_value, ZeroDivisionError)
@mock.patch('rollbar.report_exc_info')
def test_should_report_with_request_data(self, mock_report):
from fastapi import FastAPI
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
try:
from fastapi import Request
from fastapi.testclient import TestClient
except ImportError: # Added in FastAPI v0.51.0+
from starlette.requests import Request
from starlette.testclient import TestClient
app = FastAPI()
rollbar_add_to(app)
@app.get('/')
def read_root():
1 / 0
client = TestClient(app)
with self.assertRaises(ZeroDivisionError):
client.get('/')
mock_report.assert_called_once()
request = mock_report.call_args[0][1]
self.assertIsInstance(request, Request)
@mock.patch('rollbar._check_config', return_value=True)
@mock.patch('rollbar._serialize_frame_data')
@mock.patch('rollbar.send_payload')
def test_should_send_payload_with_request_data(self, mock_send_payload, *mocks):
from fastapi import FastAPI
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
try:
from fastapi.testclient import TestClient
except ImportError: # Added in FastAPI v0.51.0+
from starlette.testclient import TestClient
app = FastAPI()
rollbar_add_to(app)
@app.get('/{path}')
def read_root(path):
1 / 0
client = TestClient(app)
with self.assertRaises(ZeroDivisionError):
client.get('/test?param1=value1¶m2=value2')
mock_send_payload.assert_called_once()
payload = mock_send_payload.call_args[0][0]
payload_request = payload['data']['request']
self.assertEqual(payload_request['method'], 'GET')
self.assertEqual(payload_request['user_ip'], 'testclient')
self.assertEqual(
payload_request['url'],
'http://testserver/test?param1=value1¶m2=value2',
)
self.assertDictEqual(payload_request['params'], {'path': 'test'})
self.assertDictEqual(
payload_request['GET'], {'param1': 'value1', 'param2': 'value2'}
)
self.assertDictEqual(
payload_request['headers'],
{
'accept': '*/*',
'accept-encoding': 'gzip, deflate',
'connection': 'keep-alive',
'host': 'testserver',
'user-agent': 'testclient',
},
)
@mock.patch('rollbar._check_config', return_value=True)
@mock.patch('rollbar._serialize_frame_data')
@mock.patch('rollbar.send_payload')
def test_should_send_payload_with_request_body(self, mock_send_payload, *mocks):
from fastapi import Body, FastAPI
from pydantic import BaseModel
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
try:
from fastapi.testclient import TestClient
except ImportError: # Added in FastAPI v0.51.0+
from starlette.testclient import TestClient
rollbar.SETTINGS['include_request_body'] = True
expected_body = {'param1': 'value1', 'param2': 'value2'}
app = FastAPI()
rollbar_add_to(app)
class TestBody(BaseModel):
param1: str
param2: str
@app.post('/')
def read_root(body: TestBody = Body(...)):
1 / 0
client = TestClient(app)
with self.assertRaises(ZeroDivisionError):
client.post('/', json=expected_body)
mock_send_payload.assert_called_once()
payload = mock_send_payload.call_args[0][0]
payload_request = payload['data']['request']
self.assertEqual(payload_request['method'], 'POST')
self.assertEqual(payload_request['user_ip'], 'testclient')
self.assertEqual(payload_request['url'], 'http://testserver/')
self.assertEqual(payload_request['body'], json.dumps(expected_body))
self.assertDictEqual(
payload_request['headers'],
{
'accept': '*/*',
'accept-encoding': 'gzip, deflate',
'connection': 'keep-alive',
'content-length': str(len(json.dumps(expected_body))),
'content-type': 'application/json',
'host': 'testserver',
'user-agent': 'testclient',
},
)
@mock.patch('rollbar._check_config', return_value=True)
@mock.patch('rollbar._serialize_frame_data')
@mock.patch('rollbar.send_payload')
def test_should_send_payload_with_form_data(self, mock_send_payload, *mocks):
from fastapi import FastAPI, Form
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
try:
from fastapi.testclient import TestClient
except ImportError: # Added in FastAPI v0.51.0+
from starlette.testclient import TestClient
expected_form = {'param1': 'value1', 'param2': 'value2'}
expected_body = b'param1=value1¶m2=value2'
app = FastAPI()
rollbar_add_to(app)
@app.post('/')
def read_root(param1: str = Form(...), param2: str = Form(...)):
1 / 0
client = TestClient(app)
with self.assertRaises(ZeroDivisionError):
r = client.post(
'/',
data=expected_body,
headers={'Content-Type': 'application/x-www-form-urlencoded'},
)
mock_send_payload.assert_called_once()
payload = mock_send_payload.call_args[0][0]
payload_request = payload['data']['request']
self.assertEqual(payload_request['method'], 'POST')
self.assertEqual(payload_request['user_ip'], 'testclient')
self.assertEqual(payload_request['url'], 'http://testserver/')
self.assertDictEqual(payload_request['POST'], expected_form)
self.assertDictEqual(
payload_request['headers'],
{
'accept': '*/*',
'accept-encoding': 'gzip, deflate',
'connection': 'keep-alive',
'content-length': str(len(expected_body)),
'content-type': 'application/x-www-form-urlencoded',
'host': 'testserver',
'user-agent': 'testclient',
},
)
@mock.patch('rollbar._check_config', return_value=True)
@mock.patch('rollbar.send_payload')
def test_should_add_framework_version_to_payload(self, mock_send_payload, *mocks):
import fastapi
from fastapi import FastAPI
import rollbar
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
self.assertIsNone(rollbar.BASE_DATA_HOOK)
app = FastAPI()
rollbar_add_to(app)
rollbar.report_exc_info()
mock_send_payload.assert_called_once()
payload = mock_send_payload.call_args[0][0]
self.assertIn('fastapi', payload['data']['framework'])
self.assertIn(fastapi.__version__, payload['data']['framework'])
@mock.patch('rollbar.lib._async.report_exc_info', new_callable=AsyncMock)
@mock.patch('rollbar.report_exc_info')
def test_should_use_async_report_exc_info_if_default_handler(
self, sync_report_exc_info, async_report_exc_info
):
from fastapi import FastAPI
import rollbar
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
try:
from fastapi.testclient import TestClient
except ImportError: # Added in FastAPI v0.51.0+
from starlette.testclient import TestClient
rollbar.SETTINGS['handler'] = 'default'
app = FastAPI()
rollbar_add_to(app)
@app.get('/')
async def root():
1 / 0
client = TestClient(app)
with self.assertRaises(ZeroDivisionError):
client.get('/')
async_report_exc_info.assert_called_once()
sync_report_exc_info.assert_not_called()
@mock.patch('rollbar.lib._async.report_exc_info', new_callable=AsyncMock)
@mock.patch('rollbar.report_exc_info')
def test_should_use_async_report_exc_info_if_any_async_handler(
self, sync_report_exc_info, async_report_exc_info
):
from fastapi import FastAPI
import rollbar
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
try:
from fastapi.testclient import TestClient
except ImportError: # Added in FastAPI v0.51.0+
from starlette.testclient import TestClient
rollbar.SETTINGS['handler'] = 'httpx'
app = FastAPI()
rollbar_add_to(app)
@app.get('/')
async def root():
1 / 0
client = TestClient(app)
with self.assertRaises(ZeroDivisionError):
client.get('/')
async_report_exc_info.assert_called_once()
sync_report_exc_info.assert_not_called()
@mock.patch('logging.Logger.warning')
@mock.patch('rollbar.lib._async.report_exc_info', new_callable=AsyncMock)
@mock.patch('rollbar.report_exc_info')
def test_should_use_sync_report_exc_info_if_non_async_handlers(
self, sync_report_exc_info, async_report_exc_info, mock_log
):
from fastapi import FastAPI
import rollbar
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
try:
from fastapi.testclient import TestClient
except ImportError: # Added in FastAPI v0.51.0+
from starlette.testclient import TestClient
rollbar.SETTINGS['handler'] = 'threading'
app = FastAPI()
rollbar_add_to(app)
@app.get('/')
async def root():
1 / 0
client = TestClient(app)
with self.assertRaises(ZeroDivisionError):
client.get('/')
sync_report_exc_info.assert_called_once()
async_report_exc_info.assert_not_called()
mock_log.assert_called_once_with(
'Failed to report asynchronously. Trying to report synchronously.'
)
def test_should_enable_loading_route_handler_if_fastapi_version_is_sufficient(self):
from fastapi import FastAPI
from fastapi.routing import APIRoute
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
from rollbar.contrib.fastapi.routing import RollbarLoggingRoute
self.assertTrue(ALLOWED_FASTAPI_VERSION)
app = FastAPI()
old_route_class = app.router.route_class
self.assertEqual(old_route_class, APIRoute)
new_route_class = rollbar_add_to(app)
self.assertNotEqual(new_route_class, old_route_class)
self.assertEqual(app.router.route_class, new_route_class)
self.assertEqual(app.router.route_class, RollbarLoggingRoute)
def test_should_enable_loading_route_handler_before_adding_routes_to_app(self):
from fastapi import FastAPI
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
from rollbar.contrib.fastapi.routing import RollbarLoggingRoute
app = FastAPI()
old_route_class = app.router.route_class
self.assertEqual(len(app.routes), 4)
new_route_class = rollbar_add_to(app)
self.assertNotEqual(new_route_class, old_route_class)
self.assertEqual(app.router.route_class, new_route_class)
self.assertEqual(len(app.routes), 4)
@app.get('/')
async def read_root():
...
self.assertEqual(app.router.route_class, new_route_class)
self.assertEqual(app.router.route_class, RollbarLoggingRoute)
self.assertEqual(len(app.routes), 5)
@mock.patch('logging.Logger.error')
def test_should_disable_loading_route_handler_after_adding_routes_to_app(
self, mock_log
):
from fastapi import FastAPI
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
app = FastAPI()
old_route_class = app.router.route_class
self.assertEqual(len(app.routes), 4)
@app.get('/')
async def read_root():
...
self.assertEqual(len(app.routes), 5)
new_route_class = rollbar_add_to(app)
self.assertEqual(len(app.routes), 5)
self.assertIsNone(new_route_class)
self.assertEqual(app.router.route_class, old_route_class)
mock_log.assert_called_once_with(
'RollbarLoggingRoute must to be added to a bare router'
' (before adding routes). See docs for more details.'
)
def test_should_enable_loading_route_handler_before_adding_routes_to_router(self):
from fastapi import APIRouter, FastAPI
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
from rollbar.contrib.fastapi.routing import RollbarLoggingRoute
app = FastAPI()
router = APIRouter()
old_app_route_class = app.router.route_class
old_router_route_class = router.route_class
self.assertEqual(len(app.routes), 4)
self.assertEqual(len(router.routes), 0)
new_route_class = rollbar_add_to(router)
self.assertNotEqual(new_route_class, old_router_route_class)
self.assertEqual(router.route_class, new_route_class)
self.assertEqual(router.route_class, RollbarLoggingRoute)
self.assertEqual(app.router.route_class, old_app_route_class)
self.assertEqual(len(app.routes), 4)
self.assertEqual(len(router.routes), 0)
@router.get('/')
async def read_root():
...
app.include_router(router)
self.assertEqual(router.route_class, new_route_class)
self.assertEqual(len(app.routes), 5)
@mock.patch('logging.Logger.error')
def test_should_disable_loading_route_handler_after_adding_routes_to_router(
self, mock_log
):
from fastapi import APIRouter, FastAPI
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
app = FastAPI()
router = APIRouter()
old_app_route_class = app.router.route_class
old_router_route_class = router.route_class
self.assertEqual(len(app.routes), 4)
self.assertEqual(len(router.routes), 0)
@router.get('/')
async def read_root():
...
app.include_router(router)
self.assertEqual(len(app.routes), 5)
self.assertEqual(len(router.routes), 1)
new_route_class = rollbar_add_to(app)
self.assertEqual(len(app.routes), 5)
self.assertEqual(len(router.routes), 1)
self.assertIsNone(new_route_class)
self.assertEqual(app.router.route_class, old_app_route_class)
self.assertEqual(router.route_class, old_router_route_class)
mock_log.assert_called_once_with(
'RollbarLoggingRoute must to be added to a bare router'
' (before adding routes). See docs for more details.'
)
def test_should_enable_loading_route_handler_for_multiple_routers(self):
from fastapi import APIRouter, FastAPI
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
from rollbar.contrib.fastapi.routing import RollbarLoggingRoute
app = FastAPI()
router1 = APIRouter()
router2 = APIRouter()
router3 = APIRouter()
old_app_route_class = app.router.route_class
old_router1_route_class = router1.route_class
old_router2_route_class = router2.route_class
old_router3_route_class = router3.route_class
self.assertEqual(len(app.routes), 4)
self.assertEqual(len(router1.routes), 0)
self.assertEqual(len(router2.routes), 0)
self.assertEqual(len(router3.routes), 0)
new_router1_route_class = rollbar_add_to(router1)
new_router2_route_class = rollbar_add_to(router2)
self.assertNotEqual(new_router1_route_class, old_router1_route_class)
self.assertNotEqual(new_router2_route_class, old_router2_route_class)
self.assertEqual(router1.route_class, RollbarLoggingRoute)
self.assertEqual(router2.route_class, RollbarLoggingRoute)
self.assertEqual(router1.route_class, new_router1_route_class)
self.assertEqual(router2.route_class, new_router2_route_class)
self.assertEqual(router3.route_class, old_router3_route_class)
self.assertEqual(app.router.route_class, old_app_route_class)
self.assertEqual(len(app.routes), 4)
self.assertEqual(len(router1.routes), 0)
self.assertEqual(len(router2.routes), 0)
self.assertEqual(len(router3.routes), 0)
@router1.get('/')
async def read1():
...
@router2.get('/')
async def read2():
...
@router3.get('/')
async def read3():
...
app.include_router(router1)
app.include_router(router2)
app.include_router(router3)
self.assertEqual(router1.route_class, new_router1_route_class)
self.assertEqual(router2.route_class, new_router2_route_class)
self.assertEqual(router3.route_class, old_router3_route_class)
self.assertEqual(len(app.routes), 7)
def test_should_enable_loading_route_handler_for_fastapi_app(self):
from fastapi import FastAPI
from fastapi.routing import APIRoute
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
from rollbar.contrib.fastapi.routing import RollbarLoggingRoute
app = FastAPI()
old_route_class = app.router.route_class
self.assertEqual(old_route_class, APIRoute)
new_route_class = rollbar_add_to(app)
self.assertNotEqual(new_route_class, old_route_class)
self.assertEqual(app.router.route_class, RollbarLoggingRoute)
self.assertEqual(app.router.route_class, new_route_class)
def test_should_enable_loading_route_handler_for_fastapi_router(self):
from fastapi import APIRouter
from fastapi.routing import APIRoute
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
from rollbar.contrib.fastapi.routing import RollbarLoggingRoute
router = APIRouter()
old_route_class = router.route_class
self.assertEqual(old_route_class, APIRoute)
new_route_class = rollbar_add_to(router)
self.assertNotEqual(new_route_class, old_route_class)
self.assertEqual(router.route_class, RollbarLoggingRoute)
self.assertEqual(router.route_class, new_route_class)
@mock.patch('logging.Logger.error')
def test_should_disable_loading_route_handler_for_unknown_app(self, mock_log):
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
class UnkownRouter:
route_class = None
class UnknownApp:
routes = []
router = UnkownRouter()
app = UnknownApp()
old_route_class = app.router.route_class
new_route_class = rollbar_add_to(app)
self.assertIsNone(new_route_class)
self.assertEqual(app.router.route_class, old_route_class)
mock_log.assert_called_once_with(
'Error adding RollbarLoggingRoute to application.'
)
@mock.patch('logging.Logger.error')
def test_should_disable_loading_route_handler_for_unknown_router(self, mock_log):
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
class UnknownRouter:
routes = []
route_class = None
router = UnknownRouter()
old_route_class = router.route_class
new_route_class = rollbar_add_to(router)
self.assertIsNone(new_route_class)
self.assertEqual(router.route_class, old_route_class)
mock_log.assert_called_once_with(
'Error adding RollbarLoggingRoute to application.'
)
def test_should_warn_if_middleware_in_use(self):
from fastapi import FastAPI
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
from rollbar.contrib.fastapi import ReporterMiddleware as FastAPIMiddleware
from rollbar.contrib.starlette import ReporterMiddleware as StarletteMiddleware
from rollbar.contrib.asgi import ReporterMiddleware as ASGIMiddleware
for middleware in (FastAPIMiddleware, StarletteMiddleware, ASGIMiddleware):
with mock.patch('logging.Logger.warning') as mock_log:
app = FastAPI()
app.add_middleware(middleware)
rollbar_add_to(app)
mock_log.assert_called_once_with(
f'Detected middleware installed {[middleware]}'
' while loading Rollbar route handler.'
' This can cause in duplicate occurrences.'
)
@unittest2.skipUnless(
sys.version_info >= (3, 6), 'Global request access requires Python 3.6+'
)
@mock.patch('rollbar.contrib.fastapi.routing.store_current_request')
def test_should_store_current_request(self, store_current_request):
from fastapi import FastAPI
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
try:
from fastapi.testclient import TestClient
except ImportError: # Added in FastAPI v0.51.0+
from starlette.testclient import TestClient
expected_scope = {
'client': ['testclient', 50000],
'headers': [
(b'host', b'testserver'),
(b'user-agent', b'testclient'),
(b'accept-encoding', b'gzip, deflate'),
(b'accept', b'*/*'),
(b'connection', b'keep-alive'),
],
'http_version': '1.1',
'method': 'GET',
'path': '/',
'query_string': b'',
'root_path': '',
'scheme': 'http',
'server': ['testserver', 80],
'type': 'http',
}
app = FastAPI()
rollbar_add_to(app)
@app.get('/')
async def read_root():
...
client = TestClient(app)
client.get('/')
store_current_request.assert_called_once()
scope = store_current_request.call_args[0][0]
self.assertDictContainsSubset(expected_scope, scope)
@unittest2.skipUnless(
sys.version_info >= (3, 6), 'Global request access is supported in Python 3.6+'
)
def test_should_return_current_request(self):
from fastapi import FastAPI
from rollbar.contrib.fastapi import get_current_request
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
try:
from fastapi import Request
from fastapi.testclient import TestClient
except ImportError: # Added in FastAPI v0.51.0+
from starlette.requests import Request
from starlette.testclient import TestClient
app = FastAPI()
rollbar_add_to(app)
@app.get('/')
async def read_root(original_request: Request):
request = get_current_request()
self.assertEqual(request, original_request)
client = TestClient(app)
client.get('/')
@mock.patch('rollbar.contrib.starlette.requests.ContextVar', None)
@mock.patch('logging.Logger.error')
def test_should_not_return_current_request_for_older_python(self, mock_log):
from fastapi import FastAPI
from rollbar.contrib.fastapi import get_current_request
from rollbar.contrib.fastapi.routing import add_to as rollbar_add_to
try:
from fastapi import Request
from fastapi.testclient import TestClient
except ImportError: # Added in FastAPI v0.51.0+
from starlette.requests import Request
from starlette.testclient import TestClient
app = FastAPI()
rollbar_add_to(app)
@app.get('/')
async def read_root(original_request: Request):
request = get_current_request()
self.assertIsNone(request)
self.assertNotEqual(request, original_request)
mock_log.assert_called_once_with(
'Python 3.7+ (or aiocontextvars package)'
' is required to receive current request.'
)
client = TestClient(app)
client.get('/')
def test_should_support_type_hints(self):
from typing import Optional, Type, Union
from fastapi import APIRouter, FastAPI
from fastapi.routing import APIRoute
import rollbar.contrib.fastapi.routing
self.assertDictEqual(
rollbar.contrib.fastapi.routing.add_to.__annotations__,
{
'app_or_router': Union[FastAPI, APIRouter],
'return': Optional[Type[APIRoute]],
},
)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01"
blob_auditing_policy_name = "default"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/extendedAuditingSettings/{blobAuditingPolicyName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"sqlPoolName": _SERIALIZER.url("sql_pool_name", sql_pool_name, 'str'),
"blobAuditingPolicyName": _SERIALIZER.url("blob_auditing_policy_name", blob_auditing_policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
subscription_id: str,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01"
blob_auditing_policy_name = "default"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/extendedAuditingSettings/{blobAuditingPolicyName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"sqlPoolName": _SERIALIZER.url("sql_pool_name", sql_pool_name, 'str'),
"blobAuditingPolicyName": _SERIALIZER.url("blob_auditing_policy_name", blob_auditing_policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_list_by_sql_pool_request(
subscription_id: str,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/extendedAuditingSettings')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"sqlPoolName": _SERIALIZER.url("sql_pool_name", sql_pool_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ExtendedSqlPoolBlobAuditingPoliciesOperations(object):
"""ExtendedSqlPoolBlobAuditingPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
**kwargs: Any
) -> "_models.ExtendedSqlPoolBlobAuditingPolicy":
"""Gets an extended Sql pool's blob auditing policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExtendedSqlPoolBlobAuditingPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.ExtendedSqlPoolBlobAuditingPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExtendedSqlPoolBlobAuditingPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExtendedSqlPoolBlobAuditingPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/extendedAuditingSettings/{blobAuditingPolicyName}'} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
parameters: "_models.ExtendedSqlPoolBlobAuditingPolicy",
**kwargs: Any
) -> "_models.ExtendedSqlPoolBlobAuditingPolicy":
"""Creates or updates an extended Sql pool's blob auditing policy.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param parameters: The extended Sql pool blob auditing policy.
:type parameters: ~azure.mgmt.synapse.models.ExtendedSqlPoolBlobAuditingPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExtendedSqlPoolBlobAuditingPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.ExtendedSqlPoolBlobAuditingPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExtendedSqlPoolBlobAuditingPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ExtendedSqlPoolBlobAuditingPolicy')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExtendedSqlPoolBlobAuditingPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExtendedSqlPoolBlobAuditingPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/extendedAuditingSettings/{blobAuditingPolicyName}'} # type: ignore
@distributed_trace
def list_by_sql_pool(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
**kwargs: Any
) -> Iterable["_models.ExtendedSqlPoolBlobAuditingPolicyListResult"]:
"""Lists extended auditing settings of a Sql pool.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExtendedSqlPoolBlobAuditingPolicyListResult or the
result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.synapse.models.ExtendedSqlPoolBlobAuditingPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExtendedSqlPoolBlobAuditingPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_sql_pool_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
template_url=self.list_by_sql_pool.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_sql_pool_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ExtendedSqlPoolBlobAuditingPolicyListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_sql_pool.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/extendedAuditingSettings'} # type: ignore
| |
# Copyright (c) 2010-2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from contextlib import contextmanager
import mock
import os
import random
import tempfile
import unittest
import shutil
import copy
from collections import defaultdict, Counter
from swift.common.exceptions import RingBuilderError
from swift.common.ring import RingBuilder, Ring
from swift.common.ring.composite_builder import (
compose_rings, CompositeRingBuilder, CooperativeRingBuilder)
def make_device_iter():
x = 0
base_port = 6000
while True:
yield {'region': 0, # Note that region may be replaced on the tests
'zone': 0,
'ip': '10.0.0.%s' % x,
'replication_ip': '10.0.0.%s' % x,
'port': base_port + x,
'replication_port': base_port + x,
'device': 'sda',
'weight': 100.0, }
x += 1
class BaseTestCompositeBuilder(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.device_iter = make_device_iter()
self.output_ring = os.path.join(self.tmpdir, 'composite.ring.gz')
def pop_region_device(self, region):
dev = next(self.device_iter)
dev.update({'region': region})
return dev
def tearDown(self):
try:
shutil.rmtree(self.tmpdir, True)
except OSError:
pass
def save_builder_with_no_id(self, builder, fname):
orig_to_dict = builder.to_dict
def fake_to_dict():
res = orig_to_dict()
res.pop('id')
return res
with mock.patch.object(builder, 'to_dict', fake_to_dict):
builder.save(fname)
def save_builders(self, builders, missing_ids=None, prefix='builder'):
missing_ids = missing_ids or []
builder_files = []
for i, builder in enumerate(builders):
fname = os.path.join(self.tmpdir, '%s_%s.builder' % (prefix, i))
if i in missing_ids:
self.save_builder_with_no_id(builder, fname)
else:
builder.save(fname)
builder_files.append(fname)
return builder_files
def create_sample_ringbuilders(self, num_builders=2, rebalance=True):
"""
Create sample rings with four devices
:returns: a list of ring builder instances
"""
builders = []
for region in range(num_builders):
fname = os.path.join(self.tmpdir, 'builder_%s.builder' % region)
builder = RingBuilder(6, 3, 0)
for _ in range(5):
dev = self.pop_region_device(region)
builder.add_dev(dev)
# remove last dev to simulate a ring with some history
builder.remove_dev(dev['id'])
# add a dev that won't be assigned any parts
new_dev = self.pop_region_device(region)
new_dev['weight'] = 0
builder.add_dev(new_dev)
if rebalance:
builder.rebalance()
builder.save(fname)
self.assertTrue(os.path.exists(fname))
builders.append(builder)
return builders
def add_dev(self, builder, weight=None, region=None):
if region is None:
dev = next(builder._iter_devs())
region = dev['region']
new_dev = self.pop_region_device(region)
if weight is not None:
new_dev['weight'] = weight
builder.add_dev(new_dev)
def add_dev_and_rebalance(self, builder, weight=None):
self.add_dev(builder, weight)
builder.rebalance()
def assertDevices(self, composite_ring, builders):
"""
:param composite_ring: a Ring instance
:param builders: a list of RingBuilder instances for assertion
"""
# assert all component devices are in composite device table
builder_devs = []
for builder in builders:
builder_devs.extend([
(dev['ip'], dev['port'], dev['device'])
for dev in builder._iter_devs()])
got_devices = [
(dev['ip'], dev['port'], dev['device'])
for dev in composite_ring.devs if dev]
self.assertEqual(sorted(builder_devs), sorted(got_devices),
"composite_ring mismatched with part of the rings")
# assert composite device ids correctly index into the dev list
dev_ids = []
for i, dev in enumerate(composite_ring.devs):
if dev:
self.assertEqual(i, dev['id'])
dev_ids.append(dev['id'])
self.assertEqual(len(builder_devs), len(dev_ids))
def uniqueness(dev):
return (dev['ip'], dev['port'], dev['device'])
# assert part assignment is ordered by ring order
part_count = composite_ring.partition_count
for part in range(part_count):
primaries = [uniqueness(primary) for primary in
composite_ring.get_part_nodes(part)]
offset = 0
for builder in builders:
sub_primaries = [uniqueness(primary) for primary in
builder.get_part_devices(part)]
self.assertEqual(
primaries[offset:offset + builder.replicas],
sub_primaries,
"composite ring is not ordered by ring order, %s, %s"
% (primaries, sub_primaries))
offset += builder.replicas
def check_composite_ring(self, ring_file, builders):
got_ring = Ring(ring_file)
self.assertEqual(got_ring.partition_count, builders[0].parts)
self.assertEqual(got_ring.replica_count,
sum(b.replicas for b in builders))
self.assertEqual(got_ring._part_shift, builders[0].part_shift)
self.assertDevices(got_ring, builders)
def check_composite_meta(self, cb_file, builder_files, version=1):
with open(cb_file) as fd:
actual = json.load(fd)
builders = [RingBuilder.load(fname) for fname in builder_files]
expected_metadata = {
'saved_path': os.path.abspath(cb_file),
'serialization_version': 1,
'version': version,
'components': [
{'id': builder.id,
'version': builder.version,
'replicas': builder.replicas,
}
for builder in builders
],
'component_builder_files':
dict((builder.id, os.path.abspath(builder_files[i]))
for i, builder in enumerate(builders))
}
self.assertEqual(expected_metadata, actual)
def _make_composite_builder(self, builders):
# helper to compose a ring, save it and sanity check it
builder_files = self.save_builders(builders)
cb = CompositeRingBuilder(builder_files)
cb.compose().save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
return cb, builder_files
class TestCompositeBuilder(BaseTestCompositeBuilder):
def test_compose_rings(self):
def do_test(builder_count):
builders = self.create_sample_ringbuilders(builder_count)
rd = compose_rings(builders)
rd.save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
do_test(2)
do_test(3)
do_test(4)
def test_composite_same_region_in_the_different_rings_error(self):
builder_1 = self.create_sample_ringbuilders(1)
builder_2 = self.create_sample_ringbuilders(1)
builders = builder_1 + builder_2
with self.assertRaises(ValueError) as cm:
compose_rings(builders)
self.assertIn('Same region found in different rings',
cm.exception.message)
def test_composite_only_one_ring_in_the_args_error(self):
builders = self.create_sample_ringbuilders(1)
with self.assertRaises(ValueError) as cm:
compose_rings(builders)
self.assertIn(
'Two or more component builders are required.',
cm.exception.message)
def test_composite_same_device_in_the_different_rings_error(self):
builders = self.create_sample_ringbuilders(2)
same_device = copy.deepcopy(builders[0].devs[0])
# create one more ring which duplicates a device in the first ring
builder = RingBuilder(6, 3, 1)
_, fname = tempfile.mkstemp(dir=self.tmpdir)
# add info to feed to add_dev
same_device.update({'region': 2, 'weight': 100})
builder.add_dev(same_device)
# add rest of the devices, which are unique
for _ in range(3):
dev = self.pop_region_device(2)
builder.add_dev(dev)
builder.rebalance()
builder.save(fname)
# sanity
self.assertTrue(os.path.exists(fname))
builders.append(builder)
with self.assertRaises(ValueError) as cm:
compose_rings(builders)
self.assertIn(
'Duplicate ip/port/device combination %(ip)s/%(port)s/%(device)s '
'found in builders at indexes 0 and 2' %
same_device, cm.exception.message)
def test_different_part_power_error(self):
# create a ring builder
# (default, part power is 6 with create_sample_ringbuilders)
builders = self.create_sample_ringbuilders(1)
# prepare another ring which has different part power
incorrect_builder = RingBuilder(4, 3, 1)
_, fname = tempfile.mkstemp(dir=self.tmpdir)
for _ in range(4):
dev = self.pop_region_device(1)
incorrect_builder.add_dev(dev)
incorrect_builder.rebalance()
incorrect_builder.save(fname)
# sanity
self.assertTrue(os.path.exists(fname))
# sanity
correct_builder = builders[0]
self.assertNotEqual(correct_builder.part_shift,
incorrect_builder.part_shift)
self.assertNotEqual(correct_builder.part_power,
incorrect_builder.part_power)
builders.append(incorrect_builder)
with self.assertRaises(ValueError) as cm:
compose_rings(builders)
self.assertIn("All builders must have same value for 'part_power'",
cm.exception.message)
def test_compose_rings_float_replica_count_builder_error(self):
builders = self.create_sample_ringbuilders(1)
# prepare another ring which has float replica count
incorrect_builder = RingBuilder(6, 1.5, 1)
_, fname = tempfile.mkstemp(dir=self.tmpdir)
for _ in range(4):
dev = self.pop_region_device(1)
incorrect_builder.add_dev(dev)
incorrect_builder.rebalance()
incorrect_builder.save(fname)
# sanity
self.assertTrue(os.path.exists(fname))
self.assertEqual(1.5, incorrect_builder.replicas)
# the first replica has 2 ** 6 partitions
self.assertEqual(
2 ** 6, len(incorrect_builder._replica2part2dev[0]))
# but the second replica has the half of the first partitions
self.assertEqual(
2 ** 5, len(incorrect_builder._replica2part2dev[1]))
builders.append(incorrect_builder)
with self.assertRaises(ValueError) as cm:
compose_rings(builders)
self.assertIn("Problem with builders", cm.exception.message)
self.assertIn("Non integer replica count", cm.exception.message)
def test_compose_rings_rebalance_needed(self):
builders = self.create_sample_ringbuilders(2)
# add a new device to builder 1 but no rebalance
dev = self.pop_region_device(1)
builders[1].add_dev(dev)
self.assertTrue(builders[1].devs_changed) # sanity check
with self.assertRaises(ValueError) as cm:
compose_rings(builders)
self.assertIn("Problem with builders", cm.exception.message)
self.assertIn("Builder needs rebalance", cm.exception.message)
# after rebalance, that works (sanity)
builders[1].rebalance()
compose_rings(builders)
def test_different_replica_count_works(self):
# create a ring builder
# (default, part power is 6 with create_sample_ringbuilders)
builders = self.create_sample_ringbuilders(1)
# prepare another ring which has different replica count
builder = RingBuilder(6, 1, 1)
_, fname = tempfile.mkstemp(dir=self.tmpdir)
for _ in range(4):
dev = self.pop_region_device(1)
builder.add_dev(dev)
builder.rebalance()
builder.save(fname)
# sanity
self.assertTrue(os.path.exists(fname))
builders.append(builder)
rd = compose_rings(builders)
rd.save(self.output_ring)
got_ring = Ring(self.output_ring)
self.assertEqual(got_ring.partition_count, 2 ** 6)
self.assertEqual(got_ring.replica_count, 4) # 3 + 1
self.assertEqual(got_ring._part_shift, 26)
self.assertDevices(got_ring, builders)
def test_ring_swap(self):
# sanity
builders = sorted(self.create_sample_ringbuilders(2))
rd = compose_rings(builders)
rd.save(self.output_ring)
got_ring = Ring(self.output_ring)
self.assertEqual(got_ring.partition_count, 2 ** 6)
self.assertEqual(got_ring.replica_count, 6)
self.assertEqual(got_ring._part_shift, 26)
self.assertDevices(got_ring, builders)
# even if swapped, it works
reverse_builders = sorted(builders, reverse=True)
self.assertNotEqual(reverse_builders, builders)
rd = compose_rings(reverse_builders)
rd.save(self.output_ring)
got_ring = Ring(self.output_ring)
self.assertEqual(got_ring.partition_count, 2 ** 6)
self.assertEqual(got_ring.replica_count, 6)
self.assertEqual(got_ring._part_shift, 26)
self.assertDevices(got_ring, reverse_builders)
# but if the composite rings are different order, the composite ring
# *will* be different. Note that the CompositeRingBuilder class will
# check builder order against the existing ring and fail if the order
# is different (actually checking the metadata). See also
# test_compose_different_builder_order
with self.assertRaises(AssertionError) as cm:
self.assertDevices(got_ring, builders)
self.assertIn("composite ring is not ordered by ring order",
cm.exception.message)
class TestCompositeRingBuilder(BaseTestCompositeBuilder):
def test_compose_with_builder_files(self):
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json')
builders = self.create_sample_ringbuilders(2)
cb, _ = self._make_composite_builder(builders)
cb.save(cb_file)
for i, b in enumerate(builders):
self.add_dev_and_rebalance(b)
self.save_builders(builders)
cb = CompositeRingBuilder.load(cb_file)
cb.compose().save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
def test_compose_ok(self):
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json')
builders = self.create_sample_ringbuilders(2)
# make first version of composite ring
cb, builder_files = self._make_composite_builder(builders)
# check composite builder persists ok
cb.save(cb_file)
self.assertTrue(os.path.exists(cb_file))
self.check_composite_meta(cb_file, builder_files)
# and reloads ok
cb = CompositeRingBuilder.load(cb_file)
self.assertEqual(1, cb.version)
# compose detects if no component builder changes, if we ask it to...
with self.assertRaises(ValueError) as cm:
cb.compose(require_modified=True)
self.assertIn('None of the component builders has been modified',
cm.exception.message)
self.assertEqual(1, cb.version)
# ...but by default will compose again despite no changes to components
cb.compose(force=True).save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
self.assertEqual(2, cb.version)
# check composite builder persists ok again
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json2')
cb.save(cb_file)
self.assertTrue(os.path.exists(cb_file))
self.check_composite_meta(cb_file, builder_files, version=2)
def test_compose_modified_component_builders(self):
# check it's ok to compose again with same but modified builders
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json')
builders = self.create_sample_ringbuilders(2)
cb, builder_files = self._make_composite_builder(builders)
ring = Ring(self.output_ring)
orig_devs = [dev for dev in ring.devs if dev]
self.assertEqual(10, len(orig_devs)) # sanity check
self.add_dev_and_rebalance(builders[1])
builder_files = self.save_builders(builders)
cb.compose().save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
ring = Ring(self.output_ring)
modified_devs = [dev for dev in ring.devs if dev]
self.assertEqual(len(orig_devs) + 1, len(modified_devs))
# check composite builder persists ok
cb.save(cb_file)
self.assertTrue(os.path.exists(cb_file))
self.check_composite_meta(cb_file, builder_files, version=2)
# and reloads ok
cb = CompositeRingBuilder.load(cb_file)
# and composes ok after reload
cb.compose(force=True).save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
# check composite builder persists ok again
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json2')
cb.save(cb_file)
self.assertTrue(os.path.exists(cb_file))
self.check_composite_meta(cb_file, builder_files, version=3)
def test_compose_override_component_builders(self):
# check passing different builder files to the compose() method
# overrides loaded builder files
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json')
builders = self.create_sample_ringbuilders(2)
cb, builder_files = self._make_composite_builder(builders)
# modify builders and save in different files
self.add_dev_and_rebalance(builders[1])
with self.assertRaises(ValueError):
# sanity check - originals are unchanged
cb.compose(builder_files, require_modified=True)
other_files = self.save_builders(builders, prefix='other')
cb.compose(other_files, require_modified=True).save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
# check composite builder persists ok
cb.save(cb_file)
self.assertTrue(os.path.exists(cb_file))
self.check_composite_meta(cb_file, other_files, version=2)
# and reloads ok
cb = CompositeRingBuilder.load(cb_file)
# and composes ok after reload
cb.compose(force=True).save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
# check composite builder persists ok again
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json2')
cb.save(cb_file)
self.assertTrue(os.path.exists(cb_file))
self.check_composite_meta(cb_file, other_files, version=3)
def test_abs_paths_persisted(self):
cwd = os.getcwd()
try:
os.chdir(self.tmpdir)
builders = self.create_sample_ringbuilders(2)
builder_files = self.save_builders(builders)
rel_builder_files = [os.path.basename(bf) for bf in builder_files]
cb = CompositeRingBuilder(rel_builder_files)
cb.compose().save(self.output_ring)
self.check_composite_ring(self.output_ring, builders)
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json')
rel_cb_file = os.path.basename(cb_file)
cb.save(rel_cb_file)
self.check_composite_meta(rel_cb_file, rel_builder_files)
finally:
os.chdir(cwd)
def test_load_errors(self):
bad_file = os.path.join(self.tmpdir, 'bad_file.json')
with self.assertRaises(IOError):
CompositeRingBuilder.load(bad_file)
def check_bad_content(content):
with open(bad_file, 'wb') as fp:
fp.write(content)
try:
with self.assertRaises(ValueError) as cm:
CompositeRingBuilder.load(bad_file)
self.assertIn(
"File does not contain valid composite ring data",
cm.exception.message)
except AssertionError as err:
raise AssertionError('With content %r: %s' % (content, err))
for content in ('', 'not json', json.dumps({}), json.dumps([])):
check_bad_content(content)
good_content = {
'components': [
{'version': 1, 'id': 'uuid_x', 'replicas': 12},
{'version': 2, 'id': 'uuid_y', 'replicas': 12}
],
'builder_files': {'uuid_x': '/path/to/file_x',
'uuid_y': '/path/to/file_y'},
'version': 99}
for missing in good_content:
bad_content = dict(good_content)
bad_content.pop(missing)
check_bad_content(json.dumps(bad_content))
def test_save_errors(self):
cb_file = os.path.join(self.tmpdir, 'test-composite-ring.json')
def do_test(cb):
with self.assertRaises(ValueError) as cm:
cb.save(cb_file)
self.assertIn("No composed ring to save", cm.exception.message)
do_test(CompositeRingBuilder())
do_test(CompositeRingBuilder([]))
do_test(CompositeRingBuilder(['file1', 'file2']))
def test_rebalance(self):
@contextmanager
def mock_rebalance():
# captures component builder rebalance call results, yields a dict
# that maps builder -> results
calls = defaultdict(list)
orig_func = RingBuilder.rebalance
def func(builder, **kwargs):
result = orig_func(builder, **kwargs)
calls[builder].append(result)
return result
with mock.patch('swift.common.ring.RingBuilder.rebalance', func):
yield calls
def check_results():
self.assertEqual(2, len(rebalance_calls)) # 2 builders called
for calls in rebalance_calls.values():
self.assertFalse(calls[1:]) # 1 call to each builder
self.assertEqual(sorted(expected_ids),
sorted([b.id for b in rebalance_calls]))
self.assertEqual(sorted(expected_versions),
sorted([b.version for b in rebalance_calls]))
for b in rebalance_calls:
self.assertEqual(set(rebalance_calls.keys()),
set(b.parent_builder._builders))
# check the rebalanced builders were saved
written_builders = [RingBuilder.load(f) for f in builder_files]
self.assertEqual(expected_ids,
[b.id for b in written_builders])
self.assertEqual(expected_versions,
[b.version for b in written_builders])
# check returned results, should be in component order
self.assertEqual(2, len(results))
self.assertEqual(builder_files,
[r['builder_file'] for r in results])
self.assertEqual(expected_versions,
[r['builder'].version for r in results])
self.assertEqual(expected_ids, [r['builder'].id for r in results])
self.assertEqual(
[rebalance_calls[r['builder']][0] for r in results],
[r['result'] for r in results])
# N.B. the sample builders have zero min_part_hours
builders = self.create_sample_ringbuilders(2)
expected_versions = [b.version + 1 for b in builders]
expected_ids = [b.id for b in builders]
# test rebalance loads component builders
builder_files = self.save_builders(builders)
cb = CompositeRingBuilder(builder_files)
with mock_rebalance() as rebalance_calls:
results = cb.rebalance()
check_results()
# test loading builder files via load_components
# revert builder files to original builder state
builder_files = self.save_builders(builders)
cb = CompositeRingBuilder()
cb.load_components(builder_files)
with mock_rebalance() as rebalance_calls:
results = cb.rebalance()
check_results()
def test_rebalance_errors(self):
cb = CompositeRingBuilder()
with self.assertRaises(ValueError) as cm:
cb.rebalance()
self.assertIn('Two or more component builders are required',
cm.exception.message)
builders = self.create_sample_ringbuilders(2)
cb, builder_files = self._make_composite_builder(builders)
with mock.patch('swift.common.ring.RingBuilder.rebalance',
side_effect=RingBuilderError('test')):
with mock.patch('swift.common.ring.composite_builder.shuffle',
lambda x: x):
with self.assertRaises(RingBuilderError) as cm:
cb.rebalance()
self.assertIn('An error occurred while rebalancing component %s' %
builder_files[0], str(cm.exception))
self.assertIsNone(cb._builders)
with mock.patch('swift.common.ring.RingBuilder.validate',
side_effect=RingBuilderError('test')):
with mock.patch('swift.common.ring.composite_builder.shuffle',
lambda x: x):
with self.assertRaises(RingBuilderError) as cm:
cb.rebalance()
self.assertIn('An error occurred while rebalancing component %s' %
builder_files[0], str(cm.exception))
self.assertIsNone(cb._builders)
def test_rebalance_with_unrebalanced_builders(self):
# create 2 non-rebalanced rings
builders = self.create_sample_ringbuilders(rebalance=False)
# save builders
builder_files = self.save_builders(builders)
cb = CompositeRingBuilder(builder_files)
# sanity, it is impossible to compose un-rebalanced component rings
with self.assertRaises(ValueError) as cm:
cb.compose()
self.assertIn("Builder needs rebalance", cm.exception.message)
# but ok to compose after rebalance
cb.rebalance()
rd = cb.compose()
rd.save(self.output_ring)
rebalanced_builders = [RingBuilder.load(f) for f in builder_files]
self.check_composite_ring(self.output_ring, rebalanced_builders)
class TestLoadComponents(BaseTestCompositeBuilder):
# Tests for the loading of component builders.
def _call_method_under_test(self, cb, *args, **kwargs):
# Component builder loading is triggered by the load_components method
# and the compose method. This method provides a hook for subclasses to
# configure a different method to repeat the component loading tests.
cb.load_components(*args, **kwargs)
def test_load_components(self):
builders = self.create_sample_ringbuilders(2)
builder_files = self.save_builders(builders)
cb = CompositeRingBuilder(builder_files)
# check lazy loading
self.assertEqual(builder_files, cb._builder_files)
self.assertFalse(cb._builders) # none loaded yet
# check loading configured files
self._call_method_under_test(cb)
self.assertEqual(builder_files, cb._builder_files)
for i, builder in enumerate(cb._builders):
self.assertEqual(builders[i].id, builder.id)
self.assertEqual(builders[i].devs, builder.devs)
# modify builders and save in different files
self.add_dev_and_rebalance(builders[0])
other_files = self.save_builders(builders, prefix='other')
# reload from other files
self._call_method_under_test(cb, other_files)
self.assertEqual(other_files, cb._builder_files)
for i, builder in enumerate(cb._builders):
self.assertEqual(builders[i].id, builder.id)
self.assertEqual(builders[i].devs, builder.devs)
# modify builders again and save in same files
self.add_dev_and_rebalance(builders[1])
self.save_builders(builders, prefix='other')
# reload from same files
self._call_method_under_test(cb)
self.assertEqual(other_files, cb._builder_files)
for i, builder in enumerate(cb._builders):
self.assertEqual(builders[i].id, builder.id)
self.assertEqual(builders[i].devs, builder.devs)
def test_load_components_insufficient_builders(self):
def do_test(builder_files, force):
cb = CompositeRingBuilder(builder_files)
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, builder_files,
force=force)
self.assertIn('Two or more component builders are required',
cm.exception.message)
cb = CompositeRingBuilder()
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, builder_files,
force=force)
self.assertIn('Two or more component builders are required',
cm.exception.message)
builders = self.create_sample_ringbuilders(3)
builder_files = self.save_builders(builders)
do_test([], force=False)
do_test([], force=True) # this error is never ignored
do_test(builder_files[:1], force=False)
do_test(builder_files[:1], force=True) # this error is never ignored
def test_load_components_missing_builder_id(self):
def check_missing_id(cb, builders):
# not ok to load builder_files that have no id assigned
orig_version = cb.version
no_id = random.randint(0, len(builders) - 1)
# rewrite the builder files so that one has missing id
builder_files = self.save_builders(builders, missing_ids=[no_id])
def do_check(force):
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, builder_files,
force=force)
error_lines = cm.exception.message.split('\n')
self.assertIn("Problem with builder at index %s" % no_id,
error_lines[0])
self.assertIn("id attribute has not been initialised",
error_lines[0])
self.assertFalse(error_lines[1:])
self.assertEqual(orig_version, cb.version)
do_check(False)
do_check(True) # we never ignore this error
# check with compose not previously called, cb has no existing metadata
builders = self.create_sample_ringbuilders(3)
cb = CompositeRingBuilder()
check_missing_id(cb, builders)
# now save good copies of builders and compose so this cb has
# existing component metadata
builder_files = self.save_builders(builders)
cb = CompositeRingBuilder(builder_files)
cb.compose() # cb now has component metadata
check_missing_id(cb, builders)
def test_load_components_duplicate_builder_ids(self):
builders = self.create_sample_ringbuilders(3)
builders[2]._id = builders[0]._id
cb = CompositeRingBuilder(self.save_builders(builders))
def do_check(force):
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, force=force)
error_lines = cm.exception.message.split('\n')
self.assertIn("Builder id %r used at indexes 0, 2" %
builders[0].id, error_lines[0])
self.assertFalse(error_lines[1:])
self.assertEqual(0, cb.version)
do_check(False)
do_check(True)
def test_load_components_unchanged_builders(self):
def do_test(cb, builder_files, **kwargs):
orig_version = cb.version
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, builder_files, **kwargs)
error_lines = cm.exception.message.split('\n')
self.assertIn("None of the component builders has been modified",
error_lines[0])
self.assertFalse(error_lines[1:])
self.assertEqual(orig_version, cb.version)
builders = self.create_sample_ringbuilders(2)
cb, builder_files = self._make_composite_builder(builders)
# ok to load same *unchanged* builders
self._call_method_under_test(cb, builder_files)
# unless require_modified is set
do_test(cb, builder_files, require_modified=True)
# even if we rewrite the files
builder_files = self.save_builders(builders)
do_test(cb, builder_files, require_modified=True)
# even if we rename the files
builder_files = self.save_builders(builders, prefix='other')
do_test(cb, builder_files, require_modified=True)
# force trumps require_modified
self._call_method_under_test(cb, builder_files, force=True,
require_modified=True)
def test_load_components_older_builder(self):
# make first version of composite ring
builders = self.create_sample_ringbuilders(2)
cb, builder_files = self._make_composite_builder(builders)
old_builders = [copy.deepcopy(b) for b in builders]
# update components and reload
for i, b in enumerate(builders):
self.add_dev_and_rebalance(b)
self.assertLess(old_builders[i].version, b.version)
self.save_builders(builders)
self._call_method_under_test(cb)
orig_version = cb.version
cb.compose() # compose with newer builder versions
self.assertEqual(orig_version + 1, cb.version) # sanity check
# not ok to use old versions of same builders
self.save_builders([old_builders[0], builders[1]])
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb)
error_lines = cm.exception.message.split('\n')
self.assertIn("Invalid builder change at index 0", error_lines[0])
self.assertIn("Older builder version", error_lines[0])
self.assertFalse(error_lines[1:])
self.assertEqual(orig_version + 1, cb.version)
# not even if one component ring has changed
self.add_dev_and_rebalance(builders[1])
self.save_builders([old_builders[0], builders[1]])
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb)
error_lines = cm.exception.message.split('\n')
self.assertIn("Invalid builder change at index 0", error_lines[0])
self.assertIn("Older builder version", error_lines[0])
self.assertFalse(error_lines[1:])
self.assertEqual(orig_version + 1, cb.version)
self.assertIsNone(cb._builders)
# unless we ignore errors
self._call_method_under_test(cb, force=True)
self.assertEqual(old_builders[0].version, cb._builders[0].version)
def test_load_components_different_number_builders(self):
# not ok to use a different number of component rings
builders = self.create_sample_ringbuilders(4)
def do_test(bad_builders):
cb, builder_files = self._make_composite_builder(builders[:3])
# expect an error
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(
cb, self.save_builders(bad_builders))
error_lines = cm.exception.message.split('\n')
self.assertFalse(error_lines[1:])
self.assertEqual(1, cb.version)
# unless we ignore errors
self._call_method_under_test(cb, self.save_builders(bad_builders),
force=True)
self.assertEqual(len(bad_builders), len(cb._builders))
return error_lines
error_lines = do_test(builders[:2]) # too few
self.assertIn("Missing builder at index 2", error_lines[0])
error_lines = do_test(builders) # too many
self.assertIn("Unexpected extra builder at index 3", error_lines[0])
def test_load_components_different_builders(self):
# not ok to change component rings
builders = self.create_sample_ringbuilders(3)
cb, builder_files = self._make_composite_builder(builders[:2])
# ensure builder[0] is newer version so that's not the problem
self.add_dev_and_rebalance(builders[0])
different_files = self.save_builders([builders[0], builders[2]])
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, different_files)
error_lines = cm.exception.message.split('\n')
self.assertIn("Invalid builder change at index 1", error_lines[0])
self.assertIn("Attribute mismatch for id", error_lines[0])
self.assertFalse(error_lines[1:])
self.assertEqual(1, cb.version)
# ok if we force
self._call_method_under_test(cb, different_files, force=True)
self.assertEqual(different_files, cb._builder_files)
def test_load_component_different_builder_order(self):
# not ok to change order of component rings
builders = self.create_sample_ringbuilders(4)
cb, builder_files = self._make_composite_builder(builders)
builder_files.reverse()
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, builder_files)
error_lines = cm.exception.message.split('\n')
for i, line in enumerate(error_lines):
self.assertIn("Invalid builder change at index %s" % i, line)
self.assertIn("Attribute mismatch for id", line)
self.assertEqual(1, cb.version)
# ok if we force
self._call_method_under_test(cb, builder_files, force=True)
self.assertEqual(builder_files, cb._builder_files)
def test_load_components_replica_count_changed(self):
# not ok to change the number of replicas in a ring
builders = self.create_sample_ringbuilders(3)
cb, builder_files = self._make_composite_builder(builders)
builders[0].set_replicas(4)
self.save_builders(builders)
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb)
error_lines = cm.exception.message.split('\n')
for i, line in enumerate(error_lines):
self.assertIn("Invalid builder change at index 0", line)
self.assertIn("Attribute mismatch for replicas", line)
self.assertEqual(1, cb.version)
# ok if we force
self._call_method_under_test(cb, force=True)
class TestComposeLoadComponents(TestLoadComponents):
def _call_method_under_test(self, cb, *args, **kwargs):
cb.compose(*args, **kwargs)
def test_load_components_replica_count_changed(self):
# For compose method this test differs from superclass when the force
# flag is used, because although the force flag causes load_components
# to skip checks, the actual ring composition fails.
# not ok to change the number of replicas in a ring
builders = self.create_sample_ringbuilders(3)
cb, builder_files = self._make_composite_builder(builders)
builders[0].set_replicas(4)
self.save_builders(builders)
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb)
error_lines = cm.exception.message.split('\n')
for i, line in enumerate(error_lines):
self.assertIn("Invalid builder change at index 0", line)
self.assertIn("Attribute mismatch for replicas", line)
self.assertEqual(1, cb.version)
# if we force, then load_components succeeds but the compose pre
# validate will fail because the builder needs rebalancing
with self.assertRaises(ValueError) as cm:
self._call_method_under_test(cb, force=True)
error_lines = cm.exception.message.split('\n')
self.assertIn("Problem with builders", error_lines[0])
self.assertIn("Builder needs rebalance", error_lines[1])
self.assertFalse(error_lines[2:])
self.assertEqual(1, cb.version)
class TestCooperativeRingBuilder(BaseTestCompositeBuilder):
def _make_coop_builder(self, region, composite_builder, rebalance=False):
rb = CooperativeRingBuilder(8, 3, 1, composite_builder)
if composite_builder._builders is None:
composite_builder._builders = [rb]
for i in range(3):
self.add_dev(rb, region=region)
if rebalance:
rb.rebalance()
self.assertEqual(self._partition_counts(rb),
{0: 256, 1: 256, 2: 256}) # sanity check
return rb
def _partition_counts(self, builder):
"""
Returns a dictionary mapping device id's to (number of
partitions assigned to that device).
"""
return Counter(builder.devs[dev_id]['id']
for part2dev_id in builder._replica2part2dev
for dev_id in part2dev_id)
@mock.patch('swift.common.ring.builder.time')
def test_rebalance_respects_cobuilder_part_moves(self, mock_time):
def do_rebalance(builder):
old_part_devs = [builder._devs_for_part(part)
for part in range(builder.parts)]
num_moved, _, _ = builder.rebalance()
moved_parts = {
p for p in range(builder.parts)
if old_part_devs[p] != builder._devs_for_part(p)}
self.assertEqual(len(moved_parts), num_moved) # sanity check
return num_moved, moved_parts
def num_parts_can_move(builder):
# note that can_part_move() gives consideration to the
# _part_moved_bitmap which is only reset when a rebalance starts
return len(
[p for p in range(builder.parts)
if super(CooperativeRingBuilder, builder)._can_part_move(p)])
mock_time.return_value = 0
cb = CompositeRingBuilder()
rb1 = self._make_coop_builder(1, cb)
rb2 = self._make_coop_builder(2, cb)
rb3 = self._make_coop_builder(3, cb)
cb._builders = [rb1, rb2, rb3]
# all cobuilders can perform initial rebalance
for rb in (rb1, rb2, rb3):
rb.rebalance()
actual = self._partition_counts(rb)
exp = {0: 256, 1: 256, 2: 256}
self.assertEqual(exp, actual,
'Expected %s but got %s for region %s' %
(exp, actual, next(rb._iter_devs())['region']))
# jump forwards min_part_hours, both builders can move all parts
mock_time.return_value = 3600
self.add_dev(rb1)
# sanity checks: rb1 and rb2 are both ready for rebalance
self.assertEqual(0, rb2.min_part_seconds_left)
self.assertEqual(0, rb1.min_part_seconds_left)
# ... but last_part_moves not yet updated to current epoch
self.assertEqual(0, num_parts_can_move(rb1))
self.assertEqual(0, num_parts_can_move(rb2))
# rebalancing rb1 will update epoch for both builders' last_part_moves
num_moved, rb1_parts_moved = do_rebalance(rb1)
self.assertEqual(192, num_moved)
self.assertEqual(self._partition_counts(rb1),
{0: 192, 1: 192, 2: 192, 3: 192})
# N.B. num_parts_can_move gathers super class's (i.e. RingBuilder)
# _can_part_move so that it doesn't refer cobuilders state.
self.assertEqual(256, num_parts_can_move(rb2))
self.assertEqual(64, num_parts_can_move(rb1))
# rebalancing rb2 - rb2 in isolation could potentially move all parts
# so would move 192 parts to new device, but it is constrained by rb1
# only having 64 parts that can move
self.add_dev(rb2)
num_moved, rb2_parts_moved = do_rebalance(rb2)
self.assertEqual(64, num_moved)
counts = self._partition_counts(rb2)
self.assertEqual(counts[3], 64)
self.assertEqual([234, 235, 235], sorted(counts.values()[:3]))
self.assertFalse(rb2_parts_moved.intersection(rb1_parts_moved))
self.assertEqual(192, num_parts_can_move(rb2))
self.assertEqual(64, num_parts_can_move(rb1))
# rb3 can't rebalance - all parts moved while rebalancing rb1 and rb2
self.add_dev(rb3)
num_moved, rb3_parts_moved = do_rebalance(rb3)
self.assertEqual(0, num_moved)
# jump forwards min_part_hours, both builders can move all parts again,
# so now rb2 should be able to further rebalance
mock_time.return_value = 7200
do_rebalance(rb2)
self.assertGreater(self._partition_counts(rb2)[3], 64)
self.assertLess(num_parts_can_move(rb2), 256)
self.assertEqual(256, num_parts_can_move(rb1)) # sanity check
# but cobuilders will not prevent a rb rebalancing for first time
rb4 = self._make_coop_builder(4, cb, rebalance=False)
cb._builders.append(rb4)
num_moved, _, _ = rb4.rebalance()
self.assertEqual(3 * 256, num_moved)
def test_rebalance_cobuilders(self):
# verify that co-builder methods are called during one builder's
# rebalance
@contextmanager
def mock_update_last_part_moves():
# intercept calls to RingBuilder._update_last_part_moves (yes, the
# superclass method) and populate a dict mapping builder instance
# to a list of that builder's parent builder when method was called
calls = []
orig_func = RingBuilder._update_last_part_moves
def fake_update(builder):
calls.append(builder)
return orig_func(builder)
with mock.patch(
'swift.common.ring.RingBuilder._update_last_part_moves',
fake_update):
yield calls
@contextmanager
def mock_can_part_move():
# intercept calls to RingBuilder._can_part_move (yes, the
# superclass method) and populate a dict mapping builder instance
# to a list of that builder's parent builder when method was called
calls = defaultdict(list)
orig_func = RingBuilder._can_part_move
def fake_can_part_move(builder, part):
calls[builder].append(part)
return orig_func(builder, part)
with mock.patch('swift.common.ring.RingBuilder._can_part_move',
fake_can_part_move):
yield calls
# single component builder in parent builder
cb = CompositeRingBuilder()
rb1 = self._make_coop_builder(1, cb)
with mock_update_last_part_moves() as update_calls:
with mock_can_part_move() as can_part_move_calls:
rb1.rebalance()
self.assertEqual([rb1], update_calls)
self.assertEqual([rb1], can_part_move_calls.keys())
self.assertEqual(512, len(can_part_move_calls[rb1]))
# two component builders with same parent builder
cb = CompositeRingBuilder()
rb1 = self._make_coop_builder(1, cb)
rb2 = self._make_coop_builder(2, cb)
cb._builders = [rb1, rb2]
with mock_update_last_part_moves() as update_calls:
with mock_can_part_move() as can_part_move_calls:
rb2.rebalance()
# both builders get updated
self.assertEqual(sorted([rb1, rb2]), sorted(update_calls))
# rb1 has never been rebalanced so no calls propagate from its
# can_part_move method to to its superclass _can_part_move method
self.assertEqual([rb2], can_part_move_calls.keys())
with mock_update_last_part_moves() as update_calls:
with mock_can_part_move() as can_part_move_calls:
rb1.rebalance()
# both builders get updated
self.assertEqual(sorted([rb1, rb2]), sorted(update_calls))
# rb1 is being rebalanced so gets checked, and rb2 also gets checked
self.assertEqual(sorted([rb1, rb2]), sorted(can_part_move_calls))
self.assertEqual(512, len(can_part_move_calls[rb1]))
self.assertEqual(512, len(can_part_move_calls[rb2]))
def test_save_then_load(self):
cb = CompositeRingBuilder()
coop_rb = self._make_coop_builder(1, cb, rebalance=True)
builder_file = os.path.join(self.tmpdir, 'test.builder')
coop_rb.save(builder_file)
cb = CompositeRingBuilder()
loaded_coop_rb = CooperativeRingBuilder.load(builder_file,
parent_builder=cb)
self.assertIs(cb, loaded_coop_rb.parent_builder)
self.assertEqual(coop_rb.to_dict(), loaded_coop_rb.to_dict())
# check can be loaded as superclass
loaded_rb = RingBuilder.load(builder_file)
self.assertEqual(coop_rb.to_dict(), loaded_rb.to_dict())
# check can load a saved superclass
rb = RingBuilder(6, 3, 0)
for _ in range(3):
self.add_dev(rb, region=1)
rb.save(builder_file)
cb = CompositeRingBuilder()
loaded_coop_rb = CooperativeRingBuilder.load(builder_file,
parent_builder=cb)
self.assertIs(cb, loaded_coop_rb.parent_builder)
self.assertEqual(rb.to_dict(), loaded_coop_rb.to_dict())
if __name__ == '__main__':
unittest.main()
| |
#
# Copyright (c) 2015 Juniper Networks, Inc. All rights reserved.
#
#!/usr/bin/python
doc = """\
Node manager listens to process state change events and
other flag value change events to provide advanced service
management functionality.
Rules files looks like following:
====================
{ "Rules": [
{"process_name": "contrail-query-engine",
"process_state": "PROCESS_STATE_FATAL",
"action": "supervisorctl -s http://localhost:9002 """ + \
"""\stop contrail-analytics-api"},
{"process_name": "contrail-query-engine",
"process_state": "PROCESS_STATE_STOPPED",
"action": "supervisorctl -s http://localhost:9002 """ + \
"""\stop contrail-analytics-api"},
{"processname": "contrail-collector",
"process_state": "PROCESS_STATE_RUNNING",
"action": "/usr/bin/echo collector is starting >> /tmp/log"},
{"flag_name": "test", "flag_value":"true",
"action": "/usr/bin/echo flag test is set true >> /tmp/log.1"}
]
}
====================
"""
from gevent import monkey
monkey.patch_all()
import argparse
import ConfigParser
import gevent
import hashlib
import random
import os
import platform
import signal
import sys
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from pysandesh.sandesh_base import Sandesh, SandeshConfig
from analytics_nodemgr.event_manager import AnalyticsEventManager
from config_nodemgr.event_manager import ConfigEventManager
from control_nodemgr.event_manager import ControlEventManager
from analytics_database_nodemgr.event_manager import AnalyticsDatabaseEventManager
from config_database_nodemgr.event_manager import ConfigDatabaseEventManager
from vrouter_nodemgr.event_manager import VrouterEventManager
unit_names_dict = {
'contrail-analytics': [
'contrail-collector',
'contrail-analytics-api',
'contrail-snmp-collector',
'contrail-query-engine',
'contrail-alarm-gen',
'contrail-topology',
'contrail-analytics-nodemgr'
],
'contrail-config': [
'contrail-api',
'contrail-schema',
'contrail-svc-monitor',
'contrail-device-manager',
'contrail-config-nodemgr'
],
'contrail-config-database': [
'cassandra',
'zookeeper',
'contrail-config-database-nodemgr'
],
'contrail-control': [
'contrail-control',
'contrail-dns',
'contrail-named',
'contrail-control-nodemgr'
],
'contrail-vrouter': [
'contrail-vrouter-agent',
'contrail-vrouter-nodemgr'
],
'contrail-database': [
'cassandra',
'zookeeper',
'kafka',
'contrail-database-nodemgr'
]
}
def usage():
print doc
sys.exit(255)
def main(args_str=' '.join(sys.argv[1:])):
# Parse Arguments
node_parser = argparse.ArgumentParser(add_help=False)
node_parser.add_argument("--nodetype",
default='contrail-analytics',
help='Type of node which nodemgr is managing')
try:
args, remaining_argv = node_parser.parse_known_args(args_str.split())
except:
usage()
default = {'rules': '',
'collectors': [],
'hostip': '127.0.0.1',
'db_port': '9042',
'db_jmx_port': '7199',
'db_user': None,
'db_password': None,
'minimum_diskgb': 256,
'corefile_path': '/var/crashes',
'cassandra_repair_interval': 24,
'cassandra_repair_logdir': '/var/log/contrail/',
'log_local': False,
'log_level': SandeshLevel.SYS_DEBUG,
'log_category': '',
'log_file': Sandesh._DEFAULT_LOG_FILE,
'use_syslog': False,
'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY
}
default.update(SandeshConfig.get_default_options(['DEFAULTS']))
sandesh_opts = SandeshConfig.get_default_options()
node_type = args.nodetype
if platform.system() == 'Windows':
path_prefix = os.environ['ProgramData'] + '/Contrail'
else:
path_prefix = ""
config_file = path_prefix
if (node_type == 'contrail-analytics'):
config_file += '/etc/contrail/contrail-analytics-nodemgr.conf'
elif (node_type == 'contrail-config'):
config_file += '/etc/contrail/contrail-config-nodemgr.conf'
elif (node_type == 'contrail-config-database'):
config_file += '/etc/contrail/contrail-config-database-nodemgr.conf'
elif (node_type == 'contrail-control'):
config_file += '/etc/contrail/contrail-control-nodemgr.conf'
elif (node_type == 'contrail-vrouter'):
config_file += '/etc/contrail/contrail-vrouter-nodemgr.conf'
elif (node_type == 'contrail-database'):
config_file += '/etc/contrail/contrail-database-nodemgr.conf'
else:
sys.stderr.write("Node type" + str(node_type) + " is incorrect\n")
return
if (os.path.exists(config_file) == False):
sys.stderr.write("config file " + config_file + " is not present\n")
return
config = ConfigParser.SafeConfigParser()
config.read([config_file])
if 'DEFAULTS' in config.sections():
default.update(dict(config.items('DEFAULTS')))
if 'COLLECTOR' in config.sections():
try:
collector = config.get('COLLECTOR', 'server_list')
default['collectors'] = collector.split()
except ConfigParser.NoOptionError:
pass
SandeshConfig.update_options(sandesh_opts, config)
parser = argparse.ArgumentParser(parents=[node_parser],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
default.update(sandesh_opts)
parser.set_defaults(**default)
parser.add_argument("--rules",
help='Rules file to use for processing events')
parser.add_argument("--collectors",
nargs='+',
help='Collector addresses in format' +
'ip1:port1 ip2:port2')
parser.add_argument("--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument("--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument("--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument("--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--use_syslog", action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--corefile_path",
help="Location where coredump files are stored")
SandeshConfig.add_parser_arguments(parser, add_dscp=True)
if (node_type == 'contrail-database'
or node_type == 'contrail-config-database'):
parser.add_argument("--minimum_diskGB",
type=int,
dest='minimum_diskgb',
help="Minimum disk space in GB's")
parser.add_argument("--hostip",
help="IP address of host")
parser.add_argument("--db_port",
help="Cassandra DB cql port")
parser.add_argument("--db_jmx_port",
help="Cassandra DB jmx port")
parser.add_argument("--db_user",
help="Cassandra DB cql username")
parser.add_argument("--db_password",
help="Cassandra DB cql password")
parser.add_argument("--cassandra_repair_interval", type=int,
help="Time in hours to periodically run "
"nodetool repair for cassandra maintenance")
parser.add_argument("--cassandra_repair_logdir",
help="Directory for storing repair logs")
try:
_args = parser.parse_args(remaining_argv)
except:
usage()
# randomize collector list
_args.chksum = ""
if _args.collectors:
_args.chksum = hashlib.md5("".join(_args.collectors)).hexdigest()
_args.random_collectors = random.sample(_args.collectors,
len(_args.collectors))
_args.collectors = _args.random_collectors
# done parsing arguments
# TODO: restore rule_file logic somehow if needed for microservices
#rule_file = _args.rules
unit_names = unit_names_dict.get(node_type)
if node_type == 'contrail-analytics':
prog = AnalyticsEventManager(_args, unit_names)
elif node_type == 'contrail-config':
prog = ConfigEventManager(_args, unit_names)
elif node_type == 'contrail-control':
prog = ControlEventManager(_args, unit_names)
elif node_type == 'contrail-vrouter':
prog = VrouterEventManager(_args, unit_names)
elif node_type == 'contrail-database':
prog = AnalyticsDatabaseEventManager(_args, unit_names)
elif node_type == 'contrail-config-database':
prog = ConfigDatabaseEventManager(_args, unit_names)
else:
sys.stderr.write("Node type " + str(node_type) + " is incorrect\n")
return
prog.send_nodemgr_process_status()
prog.send_process_state_db(prog.group_names)
prog.config_file = config_file
prog.collector_chksum = _args.chksum
prog.random_collectors = _args.random_collectors
if platform.system() != 'Windows':
""" @sighup
Reconfig of collector list
"""
gevent.signal(signal.SIGHUP, prog.nodemgr_sighup_handler)
gevent.joinall([gevent.spawn(prog.runforever),
gevent.spawn(prog.run_periodically(prog.do_periodic_events, 60))])
if __name__ == '__main__':
main()
| |
import datetime
import html
import json
import logging
import os
import random
import re
import threading
import time
import accounts
import args
import captcha
import config
import log
import stats
import vkapi
import storage
from bot_response import ResponseType, BotResponse
from cache import UserCache, ConfCache, MessageCache
from check_friend import FriendController
from thread_manager import ThreadManager, Timeline
from vkapi import MessageReceiver, CONF_START
from vkbot_message import VkbotMessage, PeerInfo
from voice import VoiceRecognizer
ignored_errors = {
# (code, method): (message, can_retry)
(900, 'messages.send'): ('Blacklisted', False),
(902, 'messages.send'): ('Unable to reply', False),
(7, 'messages.send'): ('Banned', False),
(10, 'messages.send'): ('Unable to reply', True),
(15, 'friends.delete'): None,
(15, 'messages.setActivity'): None,
(917, 'messages.setActivity'): None,
(100, 'messages.markAsRead'): None,
(100, 'messages.getHistory'): ('Unable to get message history', True),
(113, 'users.get'): None,
(100, 'messages.removeChatUser'): ('Unable to leave', False),
(15, 'messages.removeChatUser'): ('Already kicked', False),
(8, '*'): (lambda p, m: '{}: error code 8'.format(m), True),
(10, '*'): (lambda p, m: '{}: error code 10'.format(m), True),
(100, 'messages.getChat'): None,
}
def createCaptchaHandler():
captcha_params = {
'antigate_key': config.get('captcha.antigate_key'),
'png_filename': accounts.getFile('captcha.png'),
'txt_filename': accounts.getFile('captcha.txt'),
'checks_before_antigate': config.get('captcha.checks_before_antigate', 'i'),
'check_interval': config.get('captcha.check_interval', 'i'),
}
return captcha.CaptchaHandler(captcha_params)
def _getFriendControllerParams():
conf = config.get('friend_checks')
return {i: conf.get(i) for i in conf}
def createFriendController():
controller_params = _getFriendControllerParams()
return FriendController(controller_params, accounts.getFile('allowed.txt'))
def createVkApi(username, password, ignored_errors=None):
if not ignored_errors:
ignored_errors = {}
v = vkapi.VkApi(ignored_errors=ignored_errors, timeout=config.get('vkbot_timing.default_timeout', 'i'),
token_file=accounts.getFile('token.txt'),
log_file=accounts.getFile('inf.log') if args.args['logging'] else '', captcha_handler=createCaptchaHandler())
from vkapi.auth import perms
v.login_params = {'username': username, 'password': password, 'client_id': config.get('vkapi.client_id'), 'perms':
(perms.FRIENDS | perms.MESSAGES | perms.WALL | perms.OFFLINE | perms.NOTIFICATIONS
| perms.PHOTOS | perms.VIDEO | perms.GROUPS)}
return v
class TimeTracker:
def __init__(self, size, delay):
self.times = [0] * size
self.delay = delay
def hit(self):
self.times = self.times[1:] + [time.time()]
def overload(self):
return time.time() - self.times[0] < self.delay
class VkBot:
fields = 'sex,blacklisted,blacklisted_by_me'
def __init__(self, username='', password='', get_dialogs_interval=60):
self.delay_on_reply = config.get('vkbot_timing.delay_on_reply', 'i')
self.chars_per_second = config.get('vkbot_timing.chars_per_second', 'i')
self.same_user_interval = config.get('vkbot_timing.same_user_interval', 'i')
self.same_conf_interval = config.get('vkbot_timing.same_conf_interval', 'i')
self.forget_interval = config.get('vkbot_timing.forget_interval', 'i')
self.delay_on_first_reply = config.get('vkbot_timing.delay_on_first_reply', 'i')
self.stats_dialog_count = config.get('stats.dialog_count', 'i')
self.no_leave_conf = config.get('vkbot.no_leave_conf', 'b')
self.unfriend_on_invite = config.get('vkbot.unfriend_on_invite', 'b')
self.unfriend_on_create = config.get('vkbot.unfriend_on_create', 'b')
self.leave_created_conf = config.get('vkbot.leave_created_conf', 'b')
self.api = createVkApi(username, password, ignored_errors)
stats.update('logging', bool(self.api.log_file))
self.vars = json.load(open('data/defaultvars.json', encoding='utf-8'))
self.vars['default_bf'] = self.vars['bf']['id']
self.initSelf(True)
# hi java
self.users = UserCache(self.api, self.fields + ',' + FriendController.requiredFields(_getFriendControllerParams()),
config.get('cache.user_invalidate_interval', 'i'))
self.confs = ConfCache(self.api, self.self_id, config.get('cache.conf_invalidate_interval', 'i'))
self.guid = int(time.time() * 5)
self.last_viewed_comment = stats.get('last_comment', 0)
self.good_conf = {}
self.tm = ThreadManager()
self.last_message = MessageCache()
self.tracker = TimeTracker(config.get('vkbot.tracker_message_count', 'i'), config.get('vkbot.tracker_interval', 'i'))
self.tracker_multiplier = config.get('vkbot.tracker_multiplier', 'f')
if config.get('wit.enabled', 'b'):
self.voice_recognizer = VoiceRecognizer(config.get('wit.token'))
else:
self.voice_recognizer = None
self.receiver = MessageReceiver(self.api, get_dialogs_interval,
message_class=lambda *args, **kwargs:
VkbotMessage(*args, self_id=self.self_id,
voice_recognizer=self.voice_recognizer, **kwargs))
self.receiver.longpoll_callback = self.longpollCallback
if os.path.isfile(accounts.getFile('msgdump.json')):
try:
data = json.load(open(accounts.getFile('msgdump.json')))
self.last_message.load(data['cache'])
self.api.longpoll = data['longpoll']
if len(data['tracker']) == len(self.tracker.times):
self.tracker.times = data['tracker']
self.receiver.last_message_id = data['lmid']
except Exception:
logging.exception('Failed to load messages')
os.remove(accounts.getFile('msgdump.json'))
else:
logging.info('Message dump does not exist')
self.bad_conf_title = lambda s: False
self.message_lock = threading.Lock()
self.ignore_proc = lambda user, reson: None
def initSelf(self, sync=False):
def do():
try:
res = self.api.users.get(fields='contacts,relation,bdate')[0]
except IndexError:
self.api.login()
do()
return
self.self_id = res['id']
self.vars['phone'] = res.get('mobile_phone') or self.vars['phone']
self.vars['name'] = (res['first_name'], res['last_name'])
self.vars['bf'] = res.get('relation_partner') or self.vars['bf']
try:
bdate = res['bdate'].split('.')
today = datetime.date.today()
self.vars['age'] = today.year - int(bdate[2]) - ((today.month, today.day) < (int(bdate[1]), int(bdate[0])))
except LookupError:
pass
if not sync:
logging.info('My phone: ' + self.vars['phone'])
if sync:
do()
else:
threading.Thread(target=do).start()
def loadUsers(self, arr, key, clean=False):
users = []
confs = []
for i in arr:
try:
pid = key(i)
if pid <= 0:
continue
if pid > CONF_START:
confs.append(pid - CONF_START)
else:
users.append(pid)
except Exception:
pass
self.users.load(users, clean)
self.confs.load(confs, clean)
def replyOne(self, message: VkbotMessage, gen_reply):
if message.is_my_message:
return
if message.is_chat and not self.checkConf(message.chat_id):
self.replyMessage(BotResponse(message, ResponseType.IGNORE))
return
try:
if self.tm.isBusy(message.peer_id) and not self.tm.get(message.peer_id).attr['unimportant']:
return
except Exception:
return
if message.id < self.last_message.bySender(message.peer_id).get('id', 0):
return
try:
ans = gen_reply(message)
except Exception as e:
logging.exception('local {}: {}'.format(e.__class__.__name__, str(e)))
time.sleep(1)
return
self.replyMessage(ans)
def replyAll(self, gen_reply):
self.tm.gc()
messages = self.receiver.getMessages()
self.loadUsers(messages, lambda x: x.user_id)
self.loadUsers(messages, lambda x: x.peer_id)
for cur in messages:
self.replyOne(cur, gen_reply)
def longpollCallback(self, msg):
if msg.opt == {'source_mid': str(self.self_id), 'source_act': 'chat_kick_user', 'from': str(self.self_id)}:
self.good_conf[msg.sender] = False
del self.confs[msg.sender - CONF_START]
return True
if msg.opt.get('source_mid') == str(self.self_id) and msg.opt.get('source_act') == 'chat_invite_user' and msg.sender in self.good_conf:
del self.good_conf[msg.sender]
del self.confs[msg.sender - CONF_START]
return True
if msg.opt.get('source_act') == 'chat_title_update':
del self.confs[msg.sender - CONF_START]
if not storage.contains('banned', msg.sender):
logging.info('Conf {} ("{}") renamed into "{}"'.format(msg.sender - CONF_START, msg.opt['source_old_text'], msg.opt['source_text']))
if not self.no_leave_conf and not storage.contains('banned', self.confs[msg.sender - CONF_START]['invited_by']) and self.bad_conf_title(msg.opt['source_text']):
self.leaveConf(msg.sender - CONF_START)
log.write('conf', self.loggableConf(msg.sender - CONF_START) + ' (name)')
return True
if msg.opt.get('source_act') == 'chat_invite_user' and msg.opt['source_mid'] == str(self.self_id) and msg.opt['from'] != str(self.self_id):
self.logSender('%sender% added me to conf "{}" ({})'.format(self.confs[msg.sender - CONF_START]['title'], msg.sender - CONF_START),
PeerInfo(int(msg.opt['from'])))
if self.unfriend_on_invite and not storage.contains('banned', msg.opt['from']):
self.deleteFriend(int(msg.opt['from']))
if msg.opt.get('source_act') == 'chat_create' and msg.opt['from'] != str(self.self_id):
self.logSender('%sender% created conf "{}" ({})'.format(self.confs[msg.sender - CONF_START]['title'], msg.sender - CONF_START),
PeerInfo(int(msg.opt['from'])))
is_banned = storage.contains('banned', msg.opt['from'])
if self.unfriend_on_create and not is_banned:
self.deleteFriend(int(msg.opt['from']))
if not self.no_leave_conf and not is_banned and self.bad_conf_title(self.confs[msg.sender - CONF_START]['title']):
self.leaveConf(msg.sender - CONF_START)
log.write('conf', self.loggableName(int(msg.opt['from'])) + ', ' + self.loggableConf(msg.sender - CONF_START) + ' (created, name)')
return True
if msg.flags & 2: # out
if not msg.opt.get('source_act'):
self.tm.terminate(msg.sender)
return True
try:
if 'from' in msg.opt and int(msg.opt['from']) != self.tm.get(msg.sender).attr['user_id'] and not msg.opt.get('source_act'):
self.tm.get(msg.sender).attr['reply'] = True
except Exception:
pass
def sendMessage(self, to, msg, forward=None, sticker_id=None, reply=None):
if not self.good_conf.get(to, 1):
return
with self.message_lock:
self.guid += 1
time.sleep(1)
self.tracker.hit()
if sticker_id:
return self.api.messages.send(peer_id=to, sticker_id=sticker_id, random_id=self.guid)
elif forward:
return self.api.messages.send(peer_id=to, message=msg, random_id=self.guid, forward_messages=forward)
elif reply:
return self.api.messages.send(peer_id=to, message=msg, random_id=self.guid, reply_to=reply)
else:
return self.api.messages.send(peer_id=to, message=msg, random_id=self.guid)
def replyMessage(self, answer: BotResponse):
if answer.type == ResponseType.NO_READ:
return
sender_msg = self.last_message.bySender(answer.peer_id)
if answer.message_id <= sender_msg.get('id', 0):
return
if answer.type in (ResponseType.NO_RESPONSE, ResponseType.IGNORE):
if self.tm.isBusy(answer.peer_id):
return
if not sender_msg or time.time() - sender_msg['time'] > self.forget_interval:
tl = Timeline().sleep(self.delay_on_first_reply).do(lambda: self.api.messages.markAsRead(peer_id=answer.peer_id))
tl.attr['unimportant'] = True
self.tm.run(answer.peer_id, tl)
elif answer.type == ResponseType.IGNORE:
self.api.messages.markAsRead(peer_id=answer.peer_id)
else:
tl = Timeline().sleep((self.delay_on_reply - 1) * random.random() + 1).do(lambda: self.api.messages.markAsRead(peer_id=answer.peer_id))
tl.attr['unimportant'] = True
self.tm.run(answer.peer_id, tl)
if answer.type == ResponseType.NO_RESPONSE:
self.last_message.byUser(answer.user_id)['text'] = answer.message_body
self.last_message.updateTime(answer.peer_id)
if answer.peer_id > CONF_START and not answer.message_has_action:
sender_msg.setdefault('ignored', {})[answer.user_id] = time.time()
return
typing_time = 0
resend = False
if answer.type == ResponseType.TEXT:
if not answer.text.startswith('&#'):
typing_time = len(answer.text) / self.chars_per_second
if sender_msg.get('reply', '').upper() == answer.text.upper() and sender_msg['user_id'] == answer.user_id:
logging.info('Resending')
typing_time = 0
resend = True
def _send(attr):
if not set(sender_msg.get('ignored', [])) <= {answer.user_id}:
ctime = time.time()
for uid, ts in sender_msg['ignored'].items():
if uid != answer.user_id and ctime - ts < self.same_conf_interval * 3:
attr['reply'] = True
try:
if answer.type == ResponseType.STICKER:
res = self.sendMessage(answer.peer_id, '', sticker_id=answer.data)
elif resend:
res = self.sendMessage(answer.peer_id, '', sender_msg['id'])
elif attr.get('reply'):
res = self.sendMessage(answer.peer_id, answer.text, reply=answer.message_id)
else:
res = self.sendMessage(answer.peer_id, answer.text)
if res is None:
del self.users[answer.peer_id]
self.logSender('Failed to send a message to %sender%', answer.get_peer_info(), short=True)
if not answer.is_chat and self.users[answer.user_id].get('blacklisted'):
self.ignore_proc(answer.user_id, 'blacklisted me')
return
msg = self.last_message.add(answer.peer_id, answer.message_body, answer.user_id, res, answer.text)
if resend:
msg['resent'] = True
except Exception as e:
logging.exception('thread {}: {}'.format(e.__class__.__name__, str(e)))
cur_delay = (self.delay_on_reply - 1) * random.random() + 1
send_time = cur_delay + typing_time
user_delay = 0
if sender_msg:
same_interval = self.same_user_interval if answer.peer_id < CONF_START else self.same_conf_interval
if self.tracker.overload():
same_interval *= self.tracker_multiplier
user_delay = sender_msg['time'] - time.time() + same_interval
# can be negative
tl = Timeline(max(send_time, user_delay))
if answer.is_chat:
tl.attr['user_id'] = answer.user_id
if not sender_msg or time.time() - sender_msg['time'] > self.forget_interval:
tl.sleep(self.delay_on_first_reply)
tl.do(lambda: self.api.messages.markAsRead(peer_id=answer.peer_id))
else:
tl.sleepUntil(send_time, (self.delay_on_reply - 1) * random.random() + 1)
tl.do(lambda: self.api.messages.markAsRead(peer_id=answer.peer_id))
tl.sleep(cur_delay)
for action in answer.onsend_actions:
tl.do(action)
tl.sleep(cur_delay)
if typing_time:
tl.doEveryFor(vkapi.utils.TYPING_INTERVAL,
lambda: self.api.messages.setActivity(type='typing', peer_id=answer.peer_id), typing_time)
tl.do(_send, True)
self.tm.run(answer.peer_id, tl)
def checkConf(self, cid):
if cid + CONF_START in self.good_conf:
return self.good_conf[cid + CONF_START]
messages = self.api.messages.getHistory(chat_id=cid)['items']
for i in messages:
if i.get('action') == 'chat_invite_user' and i['user_id'] == self.self_id and i.get('action_mid') == self.self_id:
self.good_conf[cid + CONF_START] = True
return True
if self.leave_created_conf and i.get('action') == 'chat_create' and not storage.contains('banned', i['user_id']):
self.leaveConf(cid)
log.write('conf', self.loggableName(i['user_id']) + ', ' + self.loggableConf(cid) + ' (created)')
return False
if i.get('action') == 'chat_kick_user' and i['user_id'] == self.self_id and i.get('action_mid') == self.self_id:
if not storage.contains('banned', self.confs[cid]['invited_by']):
inviter = self.confs[cid]['invited_by']
self.leaveConf(cid)
log.write('conf', (self.loggableName(inviter) if inviter else '(???)') + ', ' + self.loggableConf(cid) + ' (left)')
return False
title = self.confs[cid]['title']
if not self.no_leave_conf and not storage.contains('banned', self.confs[cid]['invited_by']) and self.bad_conf_title(title):
self.leaveConf(cid)
log.write('conf', self.loggableConf(cid) + ' (name)')
return False
self.good_conf[cid + CONF_START] = True
return True
def leaveConf(self, cid):
if not self.confs[cid]:
return False
logging.info('Leaving conf {} ("{}")'.format(cid, self.confs[cid]['title']))
self.good_conf[cid + CONF_START] = False
return self.api.messages.removeChatUser(chat_id=cid, user_id=self.self_id)
def addFriends(self, is_good):
data = self.api.friends.getRequests(extended=1)
self.loadUsers(data['items'], lambda x: x['user_id'], True)
with self.api.delayed() as dm:
for i in data['items']:
if self.users[i['user_id']].get('blacklisted'):
dm.friends.delete(user_id=i['user_id'])
continue
res = is_good(i['user_id'], True)
if res is None:
dm.friends.add(user_id=i['user_id'])
self.logSender('Adding %sender%', PeerInfo(i['user_id']))
else:
dm.friends.delete(user_id=i['user_id'])
self.logSender('Not adding %sender% ({})'.format(res), PeerInfo(i['user_id']))
def unfollow(self):
result = []
requests = self.api.friends.getRequests(out=1)['items']
suggested = self.api.friends.getRequests(suggested=1)['items']
for i in requests:
if not storage.contains('banned', i):
result.append(i)
with self.api.delayed() as dm:
for i in suggested:
dm.friends.delete(user_id=i)
self.deleteFriend(result)
return result
def deleteFriend(self, uid):
if type(uid) == int:
self.api.friends.delete(user_id=uid)
else:
with self.api.delayed() as dm:
for i in uid:
dm.friends.delete(user_id=i)
def setOnline(self):
self.api.account.setOnline()
def getUserId(self, domain, is_conf=False):
domain = str(domain).lower().rstrip().rstrip('}').rstrip()
conf = re.search('sel=c(\\d+)', domain) or re.search('^c(\\d+)$', domain) or re.search('chat=(\\d+)', domain) or re.search('peer=2(\\d{9})',
domain)
if conf is not None:
return int(conf.group(1)) + CONF_START
if is_conf:
if domain.isdigit():
return int(domain) + CONF_START
else:
return None
if '=' in domain:
domain = domain.split('=')[-1]
if '/' in domain:
domain = domain.split('/')[-1]
if domain.startswith('-') and domain[1:].isdigit():
return int(domain)
data = self.api.users.get(user_ids=domain)
if not data:
return None
return data[0]['id']
def deleteComment(self, rep):
if rep['type'] == 'wall':
self.api.wall.delete(owner_id=self.self_id, post_id=rep['feedback']['id'])
elif rep['type'].endswith('photo'):
self.api.photos.deleteComment(owner_id=self.self_id, comment_id=rep['feedback']['id'])
elif rep['type'].endswith('video'):
self.api.video.deleteComment(owner_id=self.self_id, comment_id=rep['feedback']['id'])
else:
self.api.wall.deleteComment(owner_id=self.self_id, comment_id=rep['feedback']['id'])
def filterComments(self, test):
data = self.api.notifications.get(start_time=self.last_viewed_comment + 1, count=100)['items']
to_del = set()
to_bl = set()
self.loadUsers(data, lambda x: x['feedback']['from_id'], True)
for rep in data:
if rep['date'] != 'i':
self.last_viewed_comment = max(self.last_viewed_comment, int(rep['date']))
def _check(s):
if 'photo' in s:
return s['photo']['owner_id'] == self.self_id
if 'video' in s:
return s['video']['owner_id'] == self.self_id
if 'post' in s:
return s['post']['to_id'] == self.self_id
if rep['type'] == 'comment_post' and 'parent' in rep and rep['parent'].get('to_id', self.self_id) != self.self_id:
continue
if rep['type'].startswith('comment_') or (rep['type'].startswith('reply_comment') and _check(rep['parent'])) or rep['type'] == 'wall':
txt = html.escape(rep['feedback']['text'])
res = 'good'
frid = int(rep['feedback']['from_id'])
if frid > 0 and self.users[frid]['blacklisted']:
res = 'blacklisted'
log.write('comments', self.loggableName(frid) + ' (blacklisted): ' + txt)
self.deleteComment(rep)
to_bl.add(frid)
elif 'vk.com/' in txt:
res = 'link'
log.write('comments', self.loggableName(frid) + ' (link): ' + txt)
self.deleteComment(rep)
elif test(txt):
res = 'bad'
log.write('comments', (self.loggableName(frid) if frid > 0 else str(frid)) + ': ' + txt)
self.deleteComment(rep)
if frid > 0:
to_del.add(frid)
elif 'attachments' in rep['feedback'] and any(i.get('type') in ['video', 'link'] for i in rep['feedback']['attachments']):
res = 'attachment'
log.write('comments', self.loggableName(frid) + ' (attachment)')
self.deleteComment(rep)
self.logSender('Comment {} (by %sender%) - {}'.format(txt, res), PeerInfo(frid))
stats.update('last_comment', self.last_viewed_comment)
for i in to_bl:
self.blacklist(i)
return to_del
def setRelation(self, uid, set_by=None):
if uid:
log.write('relation', self.loggableName(uid))
else:
log.write('relation', self.loggableName(set_by) + ' (removed)')
uid = self.vars['default_bf']
self.api.account.saveProfileInfo(relation_partner_id=uid)
self.vars['bf'] = self.users[uid]
self.logSender('Set relationship with %sender%', PeerInfo(uid))
def waitAllThreads(self, loop_thread, reply):
lp = self.api.longpoll.copy()
self.receiver.terminate_monitor = True
loop_thread.join(60)
while not self.receiver.longpoll_queue.empty():
self.replyAll(reply)
self.tm.shutdown(60)
with open(accounts.getFile('msgdump.json'), 'w') as f:
json.dump({'cache': self.last_message.dump(), 'longpoll': lp, 'tracker': self.tracker.times, 'lmid': self.receiver.last_message_id}, f)
# {name} - first_name last_name
# {id} - id
def printableName(self, pid, user_fmt, conf_fmt='Conf "{name}" ({id})'):
if pid > CONF_START:
return conf_fmt.format(id=(pid - CONF_START), name=self.confs[pid - CONF_START]['title'])
elif pid > 0:
return user_fmt.format(id=pid, name=self.users[pid]['first_name'] + ' ' + self.users[pid]['last_name'])
else:
return 'Group ' + str(-pid)
def logSender(self, text, message: PeerInfo, short=False):
text_msg = text.replace('%sender%', self.printableSender(message, False, short=short))
html_msg = html.escape(text).replace('%sender%', self.printableSender(message, True, short=short))
logging.info(text_msg, extra={'db': html_msg})
def printableSender(self, peer: PeerInfo, need_html, short=False):
if peer.is_chat:
if short:
return self.printableName(peer.peer_id, '', 'conf "{name}" ({id})')
if need_html:
res = self.printableName(peer.user_id, user_fmt='Conf "%c" (%i), <a href="https://vk.com/id{id}" '
'target="_blank">{name}</a>')
return res.replace('%i', str(peer.chat_id)).replace('%c', html.escape(
self.confs[peer.chat_id]['title']))
else:
res = self.printableName(peer.user_id, user_fmt='Conf "%c" (%i), {name}')
return res.replace('%i', str(peer.chat_id)).replace('%c', html.escape(
self.confs[peer.chat_id]['title']))
else:
if need_html:
return self.printableName(peer.user_id,
user_fmt='<a href="https://vk.com/id{id}" target="_blank">{name}</a>')
else:
return self.printableName(peer.user_id, user_fmt='{name}')
def loggableName(self, uid):
return self.printableName(uid, '{id} ({name})')
def loggableConf(self, cid):
return 'conf ({}) `{}`'.format(cid, self.confs[cid]['title'].replace('`', "'"))
def loggableGroup(self, gid, name):
return 'group ({}) `{}`'.format(gid, name.replace('`', "'"))
def blacklist(self, uid):
self.api.account.banUser(user_id=uid)
def blacklistedCount(self):
return self.api.account.getBanned(count=0)['count']
def lastDialogs(self):
def cb(req, resp):
if resp:
d.append((req['peer_id'], resp['count']))
dialogs = self.api.messages.getConversations(count=self.stats_dialog_count)
d = []
confs = {}
try:
items = list(dialogs['items'])
with self.api.delayed() as dm:
for dialog in items:
peer = dialog['conversation']['peer']['id']
if storage.contains('banned', peer):
continue
dm.messages.getHistory(peer_id=peer, count=0).set_callback(cb)
if 'chat_settings' in dialog['conversation']:
confs[peer] = dialog['conversation']['chat_settings']['title']
else:
confs[peer] = ''
self.confs.load([i - CONF_START for i in confs])
invited = {}
for i in confs:
if self.confs[i - CONF_START] and self.confs[i - CONF_START].get('invited_by'):
invited[i] = self.confs[i - CONF_START]['invited_by']
self.users.load(invited.values())
for i in invited.copy():
invited[i] = [invited[i], self.printableName(invited[i], '{name}'), self.users[invited[i]]['sex'] == 1]
except TypeError:
logging.warning('Unable to fetch dialogs')
return (None, None, None, None)
return (dialogs['count'], d, confs, invited)
def clearCache(self):
self.users.clear()
self.confs.clear()
| |
#-------------------------------------------------------------------------------
# Name: opan_utils_base
# Purpose: Test objects for opan.utils.base
#
# Author: Brian Skinn
# bskinn@alum.mit.edu
#
# Created: 28 Feb 2016
# Copyright: (c) Brian Skinn 2016
# License: The MIT License; see "license.txt" for full license terms
# and contributor agreement.
#
# This file is part of opan (Open Anharmonic), a system for automated
# computation of anharmonic properties of molecular systems via wrapper
# calls to computational/quantum chemical software packages.
#
# http://www.github.com/bskinn/opan
#
#-------------------------------------------------------------------------------
import unittest
class TestOpanUtilsBaseMisc(unittest.TestCase):
def test_Utils_PackTupsGoodPacking(self):
from opan.utils import pack_tups
tups = pack_tups(range(3), range(3,6), range(6,9))
[[self.assertEqual(tups[i][j], 3*j + i) for i in range(3)]
for j in range(3)]
def test_Utils_PackTupsStrNoIter(self):
from opan.utils import pack_tups
tups = pack_tups("ab", range(2))
self.assertEqual(tups[0][0], "ab")
self.assertEqual(tups[1][0], "ab")
def test_Utils_PackTupsErrIfDiffLens(self):
from opan.utils import pack_tups
self.assertRaises(ValueError, pack_tups, range(2), range(3))
def test_Utils_PackTupsTestAllSingletons(self):
from opan.utils import pack_tups
tups = pack_tups(0,1,2,3,4)
self.assertEqual(len(tups), 1)
self.assertTupleEqual(tups[0], tuple(range(5)))
def test_Utils_SafeCastNumpyArray(self):
import numpy as np
from opan.utils import safe_cast as scast
a = np.array(range(5))
self.assertRaises(TypeError, scast, a, np.float_)
def test_Utils_SafeCastIntToFloat(self):
from opan.utils import safe_cast as scast
v = scast(1, float)
self.assertIsInstance(v, float)
def test_Utils_MakeTimeStampSecs(self):
from opan.utils import make_timestamp as mt
self.assertEqual(mt(5), "0h 0m 5s")
def test_Utils_MakeTimeStampMins(self):
from opan.utils import make_timestamp as mt
self.assertEqual(mt(500), "0h 8m 20s")
def test_Utils_MakeTimeStampHours(self):
from opan.utils import make_timestamp as mt
self.assertEqual(mt(20000), "5h 33m 20s")
def test_Utils_MakeTimeStampLongHours(self):
from opan.utils import make_timestamp as mt
self.assertEqual(mt(200000), "55h 33m 20s")
def test_Utils_DeltaFxn(self):
from opan.utils import delta_fxn as dfx
self.assertEqual(dfx(1,1), 1)
self.assertEqual(dfx(1,2), 0)
class TestOpanUtilsBaseCheckGeom(unittest.TestCase):
import numpy as np
from opan.const import EnumCheckGeomMismatch as ECGM
coords = np.array(range(12))
atoms = ['H', 'O', 'O', 'H']
def test_Utils_CheckGeom_GoodCheck(self):
from opan.utils import check_geom as cg
from opan.const import atom_num
# check_geom returns a tuple; success or not is the first element
self.assertTrue(cg(self.coords, self.atoms,
self.coords, self.atoms)[0])
# Also check if the atoms are swapped with atomic numbers
self.assertTrue(cg(self.coords, [atom_num[s] for s in self.atoms],
self.coords, self.atoms)[0])
def test_Utils_CheckGeom_ArgsNotVectors(self):
from opan.utils import check_geom as cg
import numpy as np
self.assertRaises(ValueError, cg,
self.coords.reshape((3,4)), self.atoms,
self.coords, self.atoms)
self.assertRaises(ValueError, cg,
self.coords, np.array(self.atoms).reshape((2,2)),
self.coords, self.atoms)
self.assertRaises(ValueError, cg,
self.coords, self.atoms,
self.coords.reshape((3, 4)), self.atoms)
self.assertRaises(ValueError, cg,
self.coords, self.atoms,
self.coords, np.array(self.atoms).reshape((2, 2)))
def test_Utils_CheckGeom_CoordAtomSizeMismatch(self):
from opan.utils import check_geom as cg
self.assertRaises(ValueError, cg,
self.coords, self.atoms,
self.coords[:-1], self.atoms)
self.assertRaises(ValueError, cg,
self.coords[:-1], self.atoms,
self.coords, self.atoms)
def test_Utils_CheckGeom_GeomSizeMismatch(self):
from opan.utils import check_geom as cg
tup = cg(self.coords, self.atoms, self.coords[:9], self.atoms[:3])
self.assertFalse(tup[0])
self.assertEqual(tup[1], self.ECGM.DIMENSION)
self.assertIsNone(tup[2])
def test_Utils_CheckGeom_CoordMismatch(self):
from opan.utils import check_geom as cg
# Change one of the coordinates to achieve mismatch
coord_mod = self.coords.copy()
coord_mod[0] = -12
# Store the call result and check its contents
tup = cg(coord_mod, self.atoms, self.coords, self.atoms)
self.assertFalse(tup[0])
self.assertEqual(tup[1], self.ECGM.COORDS)
self.assertFalse(tup[2][0])
self.assertTrue(all(tup[2][1:]))
def test_Utils_CheckGeom_AtomMismatch(self):
from opan.utils import check_geom as cg
# Change one of the atoms to achieve mismatch
atoms_mod = self.atoms.copy()
atoms_mod[2] = 'C'
# Store the call result and check its contents
tup = cg(self.coords, atoms_mod, self.coords, self.atoms)
self.assertFalse(tup[0])
self.assertEqual(tup[1], self.ECGM.ATOMS)
self.assertFalse(tup[2][2])
self.assertTrue(all(tup[2][:2]))
self.assertTrue(tup[2][3])
class TestOpanUtilsBaseTemplateSubst(unittest.TestCase):
from textwrap import dedent
template = dedent("""\
This <NOUN> is a two-line string.
It has <NUM> tags (maybe).
""")
subst_widget_four = dedent("""\
This widget is a two-line string.
It has four tags (maybe).
""")
dict_widget_four = {'NOUN' : 'widget',
'NUM' : 'four'}
def test_Utils_TemplateSubst_GoodSubst(self):
from opan.utils import template_subst as ts
self.assertEqual(self.subst_widget_four,
ts(self.template, self.dict_widget_four))
class TestOpanUtilsBaseAssertNPFArray(unittest.TestCase):
from opan.error import OpanError
from opan.test.utils import assertErrorAndTypecode
class TestError(OpanError):
NONE = 'NONE'
NOT_FLOAT = 'NOT_FLOAT'
NOT_ARRAY = 'NOT_ARRAY'
NO_MEMBER = 'NO_MEMBER'
def test_Utils_AssertNPFArray_TestObjIs1DFloatArray(self):
import numpy as np
from opan.utils import assert_npfloatarray as a_npfa
try:
a_npfa(np.float_(np.array(range(5))), None, "1D Float array",
self.TestError, self.TestError.NONE,
"No actual error; this should never be raised")
except Exception:
self.fail("Assertion failed on valid 1-D ndarray")
def test_Utils_AssertNPFArray_TestObjIs2DFloatArray(self):
import numpy as np
from opan.utils import assert_npfloatarray as a_npfa
try:
a_npfa(np.float_(np.array(range(6)).reshape((2,3))),
None, "1D float array",
self.TestError, self.TestError.NONE,
"No actual error; this should never be raised")
except Exception:
self.fail("Assertion failed on valid 2-D ndarray")
def test_Utils_AssertNPFArray_TestObjArrayNotFloat(self):
import numpy as np
from opan.utils import assert_npfloatarray as a_npfa
self.assertErrorAndTypecode(self.TestError, a_npfa,
self.TestError.NOT_FLOAT,
np.array(range(5)), None, "1D int array",
self.TestError, self.TestError.NOT_FLOAT,
"ASSERTION FAIL: Non-float array")
def test_Utils_AssertNPFArray_TestObjNotArray(self):
import numpy as np
from opan.utils import assert_npfloatarray as a_npfa
# NumPy numeric type, but not an array
self.assertErrorAndTypecode(self.TestError, a_npfa,
self.TestError.NOT_ARRAY,
np.float_(5.5), None, "Bare float",
self.TestError, self.TestError.NOT_ARRAY,
"ASSERTION FAIL: Non-array")
# Non-ndarray type
self.assertErrorAndTypecode(self.TestError, a_npfa,
self.TestError.NOT_ARRAY,
self.TestError, None, "Bare float",
self.TestError, self.TestError.NOT_ARRAY,
"ASSERTION FAIL: Non-array")
def test_Utils_AssertNPFArray_TestVarIsFloatArray(self):
import numpy as np
from opan.utils import assert_npfloatarray as a_npfa
class TestClass(object):
import numpy as np
testvar = np.float_(np.array(range(5)))
try:
a_npfa(TestClass, 'testvar', "Array member",
self.TestError, self.TestError.NONE,
"No actual error; this should never be raised")
except Exception:
self.fail("Assertion failed on a valid ndarray member")
def test_Utils_AssertNPFArray_TestVarNotPresent(self):
import numpy as np
from opan.utils import assert_npfloatarray as a_npfa
self.assertErrorAndTypecode(self.TestError, a_npfa,
self.TestError.NO_MEMBER,
self.TestError, 'not_there', "Absent member",
self.TestError, self.TestError.NO_MEMBER,
"ASSERTION FAIL: Member not found")
def suite():
s = unittest.TestSuite()
tl = unittest.TestLoader()
s.addTests([tl.loadTestsFromTestCase(TestOpanUtilsBaseMisc),
tl.loadTestsFromTestCase(TestOpanUtilsBaseCheckGeom),
tl.loadTestsFromTestCase(TestOpanUtilsBaseTemplateSubst),
tl.loadTestsFromTestCase(TestOpanUtilsBaseAssertNPFArray)
])
return s
if __name__ == '__main__': # pragma: no cover
print("Module not executable.")
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'valerio cosentino'
import mysql.connector
class DbUtil():
"""
This class provides database utilities
"""
def get_connection(self, config):
"""
gets DB connection
:type config: dict
:param config: the DB configuration file
"""
return mysql.connector.connect(**config)
def close_connection(self, cnx):
"""
closes DB connection
:type cnx: Object
:param cnx: DB connection to close
"""
cnx.close()
def lowercase(self, _str):
"""
conver str to lowercase
:type _str: str
:param _str: str to convert
"""
if _str:
_str = _str.lower()
return _str
def select_project_id(self, cnx, project_name, logger):
"""
gets project id
:type cnx: Object
:param cnx: DB connection
:type project_name: str
:param project_name: name of the project
:type logger: Object
:param logger: logger
"""
found = None
cursor = cnx.cursor()
query = "SELECT p.id " \
"FROM project p " \
"WHERE p.name = %s"
arguments = [project_name]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
else:
logger.error("the project " + str(project_name) + " does not exist")
cursor.close()
return found
def insert_project(self, cnx, db_name, project_name):
"""
inserts a project in the DB
:type cnx: Object
:param cnx: DB connection
:type db_name: str
:param db_name: the name of an existing DB
:type project_name: str
:param project_name: the name of the project to create
"""
self.set_database(cnx, db_name)
cursor = cnx.cursor()
query = "INSERT IGNORE INTO project " \
"VALUES (%s, %s)"
arguments = [None, project_name]
cursor.execute(query, arguments)
cnx.commit()
cursor.close()
def insert_repo(self, cnx, project_id, repo_name, logger):
"""
inserts repository
:type cnx: Object
:param cnx: DB connection
:type project_id: int
:param project_id: id of the project
:type repo_name: str
:param repo_name: name of the repository
:type logger: Object
:param logger: logger
"""
cursor = cnx.cursor()
query = "INSERT IGNORE INTO repository " \
"VALUES (%s, %s, %s)"
arguments = [None, project_id, repo_name]
cursor.execute(query, arguments)
cnx.commit()
cursor.close()
def insert_issue_tracker(self, cnx, repo_id, issue_tracker_name, issue_type, logger):
"""
inserts issue tracker
:type cnx: Object
:param cnx: DB connection
:type repo_id: int
:param repo_id: id of the repository
:type issue_tracker_name: str
:param issue_tracker_name: name of the issue tracker
:type issue_type: str
:param issue_type: type of the issue tracker
:type logger: Object
:param logger: logger
"""
cursor = cnx.cursor()
query = "INSERT IGNORE INTO issue_tracker " \
"VALUES (%s, %s, %s, %s)"
arguments = [None, repo_id, issue_tracker_name, issue_type]
cursor.execute(query, arguments)
cnx.commit()
query = "SELECT id " \
"FROM issue_tracker " \
"WHERE name = %s"
arguments = [issue_tracker_name]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
else:
logger.warning("no issue with name " + str(issue_tracker_name))
cursor.close()
return found
def select_label_id(self, cnx, name, logger):
"""
selects the label id by its name
:type cnx: Object
:param cnx: DB connection
:type name: str
:param name: the name of the label
:type logger: Object
:param logger: logger
"""
cursor = cnx.cursor()
query = "SELECT id FROM label WHERE name = %s"
arguments = [name]
cursor.execute(query, arguments)
row = cursor.fetchone()
found = None
if row:
found = row[0]
else:
logger.warning("no label with name " + str(name))
cursor.close()
return found
def insert_label(self, cnx, name, logger):
"""
inserts a label
:type cnx: Object
:param cnx: DB connection
:type name: str
:param name: the name of the label
:type logger: Object
:param logger: logger
"""
cursor = cnx.cursor()
query = "INSERT IGNORE INTO label " \
"VALUES (%s, %s)"
arguments = [None, name]
cursor.execute(query, arguments)
cnx.commit()
cursor.close()
def select_repo_id(self, cnx, repo_name, logger):
"""
selects repository id
:type cnx: Object
:param cnx: DB connection
:type repo_name: str
:param repo_name: name of the repository
:type logger: Object
:param logger: logger
"""
found = None
cursor = cnx.cursor()
query = "SELECT id " \
"FROM repository " \
"WHERE name = %s"
arguments = [repo_name]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
else:
logger.error("the repository " + repo_name + " does not exist")
cursor.close()
return found
def select_instant_messaging_id(self, cnx, im_name, logger):
"""
selects instant messaging id
:type cnx: Object
:param cnx: DB connection
:type im_name: str
:param im_name: name of the instant messaging
:type logger: Object
:param logger: logger
"""
found = None
cursor = cnx.cursor()
query = "SELECT id " \
"FROM instant_messaging " \
"WHERE name = %s"
arguments = [im_name]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
else:
logger.error("the instant messaging " + im_name + " does not exist")
cursor.close()
return found
def insert_user(self, cnx, name, email, logger):
"""
inserts user
:type cnx: Object
:param cnx: DB connection
:type name: str
:param name: name of the user
:type email: str
:param email: email of the user
:type logger: Object
:param logger: logger
"""
cursor = cnx.cursor()
query = "INSERT IGNORE INTO user " \
"VALUES (%s, %s, %s)"
arguments = [None, name, email]
cursor.execute(query, arguments)
cnx.commit()
cursor.close()
def select_user_id_by_email(self, cnx, email, logger):
"""
selects user id by email
:type cnx: Object
:param cnx: DB connection
:type email: str
:param email: email of the user
:type logger: Object
:param logger: logger
"""
found = None
if email:
cursor = cnx.cursor()
query = "SELECT id " \
"FROM user " \
"WHERE email = %s"
arguments = [email]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
else:
logger.debug("there is not user with this email " + email)
cursor.close()
return found
def select_user_id_by_name(self, cnx, name, logger):
"""
selects user id by name
:type cnx: Object
:param cnx: DB connection
:type name: str
:param name: name of the user
:type logger: Object
:param logger: logger
"""
found = None
if name:
found = None
cursor = cnx.cursor()
query = "SELECT id " \
"FROM user " \
"WHERE name = %s"
arguments = [name]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
else:
logger.debug("there is not user with this name " + name)
cursor.close()
return found
def select_forum_id(self, cnx, forum_name, logger):
"""
selects forum id
:type cnx: Object
:param cnx: DB connection
:type forum_name: str
:param forum_name: name of the forum
:type logger: Object
:param logger: logger
"""
found = None
cursor = cnx.cursor()
query = "SELECT id " \
"FROM forum " \
"WHERE name = %s"
arguments = [forum_name]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
else:
logger.error("the forum " + forum_name + " does not exist")
cursor.close()
return found
def select_issue_tracker_id(self, cnx, issue_tracker_name, logger):
"""
selects issue tracker id
:type cnx: Object
:param cnx: DB connection
:type issue_tracker_name: str
:param issue_tracker_name: name of the issue tracker
:type logger: Object
:param logger: logger
"""
found = None
cursor = cnx.cursor()
query = "SELECT id " \
"FROM issue_tracker " \
"WHERE name = %s"
arguments = [issue_tracker_name]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
else:
logger.error("the issue tracker " + issue_tracker_name + " does not exist")
cursor.close()
return found
def get_issue_dependency_type_id(self, cnx, name):
"""
selects issue dependency type id
:type cnx: Object
:param cnx: DB connection
:type name: str
:param name: dependency type name
"""
found = None
cursor = cnx.cursor()
query = "SELECT id FROM issue_dependency_type WHERE name = %s"
arguments = [name]
cursor.execute(query, arguments)
row = cursor.fetchone()
cursor.close()
if row:
found = row[0]
return found
def get_message_type_id(self, cnx, name):
"""
selects message type id
:type cnx: Object
:param cnx: DB connection
:type name: str
:param name: message type name
"""
found = None
cursor = cnx.cursor()
query = "SELECT id FROM message_type WHERE name = %s"
arguments = [name]
cursor.execute(query, arguments)
row = cursor.fetchone()
if row:
found = row[0]
cursor.close()
return found
def set_database(self, cnx, db_name):
"""
set database
:type cnx: Object
:param cnx: DB connection
:type db_name: str
:param db_name: name of the database
"""
cursor = cnx.cursor()
use_database = "USE " + db_name
cursor.execute(use_database)
cursor.close()
def set_settings(self, cnx):
"""
set database settings
:type cnx: Object
:param cnx: DB connection
"""
cursor = cnx.cursor()
cursor.execute("set global innodb_file_format = BARRACUDA")
cursor.execute("set global innodb_file_format_max = BARRACUDA")
cursor.execute("set global innodb_large_prefix = ON")
cursor.execute("set global character_set_server = utf8")
cursor.execute("set global max_connections = 500")
cursor.close()
def restart_connection(self, config, logger):
"""
restart DB connection
:type config: dict
:param config: the DB configuration file
:type logger: Object
:param logger: logger
"""
logger.info("restarting connection...")
return mysql.connector.connect(**config)
| |
# Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
ZFS Storage Appliance REST API Client Programmatic Interface
TODO(diemtran): this module needs to be placed in a library common to OpenStack
services. When this happens, the file should be removed from Manila code
base and imported from the relevant library.
"""
import time
from oslo_serialization import jsonutils
import six
from six.moves import http_client
# pylint: disable=E0611,F0401
from six.moves.urllib import error as urlerror
from six.moves.urllib import request as urlrequest
def log_debug_msg(obj, message):
if obj.log_function:
obj.log_function(message)
class Status(object):
"""Result HTTP Status."""
#: Request return OK
OK = http_client.OK # pylint: disable=invalid-name
#: New resource created successfully
CREATED = http_client.CREATED
#: Command accepted
ACCEPTED = http_client.ACCEPTED
#: Command returned OK but no data will be returned
NO_CONTENT = http_client.NO_CONTENT
#: Bad Request
BAD_REQUEST = http_client.BAD_REQUEST
#: User is not authorized
UNAUTHORIZED = http_client.UNAUTHORIZED
#: The request is not allowed
FORBIDDEN = http_client.FORBIDDEN
#: The requested resource was not found
NOT_FOUND = http_client.NOT_FOUND
#: The request is not allowed
NOT_ALLOWED = http_client.METHOD_NOT_ALLOWED
#: Request timed out
TIMEOUT = http_client.REQUEST_TIMEOUT
#: Invalid request
CONFLICT = http_client.CONFLICT
#: Service Unavailable
BUSY = http_client.SERVICE_UNAVAILABLE
class RestResult(object):
"""Result from a REST API operation."""
def __init__(self, logfunc=None, response=None, err=None):
"""Initialize a RestResult containing the results from a REST call.
:param logfunc: debug log function.
:param response: HTTP response.
:param err: HTTP error.
"""
self.response = response
self.log_function = logfunc
self.error = err
self.data = ""
self.status = 0
if self.response:
self.status = self.response.getcode()
result = self.response.read()
while result:
self.data += result
result = self.response.read()
if self.error:
self.status = self.error.code
self.data = http_client.responses[self.status]
log_debug_msg(self, 'Response code: %s' % self.status)
log_debug_msg(self, 'Response data: %s' % self.data)
def get_header(self, name):
"""Get an HTTP header with the given name from the results.
:param name: HTTP header name.
:return: The header value or None if no value is found.
"""
if self.response is None:
return None
info = self.response.info()
return info.getheader(name)
class RestClientError(Exception):
"""Exception for ZFS REST API client errors."""
def __init__(self, status, name="ERR_INTERNAL", message=None):
"""Create a REST Response exception.
:param status: HTTP response status.
:param name: The name of the REST API error type.
:param message: Descriptive error message returned from REST call.
"""
super(RestClientError, self).__init__(message)
self.code = status
self.name = name
self.msg = message
if status in http_client.responses:
self.msg = http_client.responses[status]
def __str__(self):
return "%d %s %s" % (self.code, self.name, self.msg)
class RestClientURL(object): # pylint: disable=R0902
"""ZFSSA urllib client."""
def __init__(self, url, logfunc=None, **kwargs):
"""Initialize a REST client.
:param url: The ZFSSA REST API URL.
:key session: HTTP Cookie value of x-auth-session obtained from a
normal BUI login.
:key timeout: Time in seconds to wait for command to complete.
(Default is 60 seconds).
"""
self.url = url
self.log_function = logfunc
self.local = kwargs.get("local", False)
self.base_path = kwargs.get("base_path", "/api")
self.timeout = kwargs.get("timeout", 60)
self.headers = None
if kwargs.get('session'):
self.headers['x-auth-session'] = kwargs.get('session')
self.headers = {"content-type": "application/json"}
self.do_logout = False
self.auth_str = None
def _path(self, path, base_path=None):
"""Build rest url path."""
if path.startswith("http://") or path.startswith("https://"):
return path
if base_path is None:
base_path = self.base_path
if not path.startswith(base_path) and not (
self.local and ("/api" + path).startswith(base_path)):
path = "%s%s" % (base_path, path)
if self.local and path.startswith("/api"):
path = path[4:]
return self.url + path
def _authorize(self):
"""Performs authorization setting x-auth-session."""
self.headers['authorization'] = 'Basic %s' % self.auth_str
if 'x-auth-session' in self.headers:
del self.headers['x-auth-session']
try:
result = self.post("/access/v1")
del self.headers['authorization']
if result.status == http_client.CREATED:
self.headers['x-auth-session'] = (
result.get_header('x-auth-session'))
self.do_logout = True
log_debug_msg(self, ('ZFSSA version: %s')
% result.get_header('x-zfssa-version'))
elif result.status == http_client.NOT_FOUND:
raise RestClientError(result.status, name="ERR_RESTError",
message=("REST Not Available:"
"Please Upgrade"))
except RestClientError:
del self.headers['authorization']
raise
def login(self, auth_str):
"""Login to an appliance using a user name and password.
Start a session like what is done logging into the BUI. This is not a
requirement to run REST commands, since the protocol is stateless.
What is does is set up a cookie session so that some server side
caching can be done. If login is used remember to call logout when
finished.
:param auth_str: Authorization string (base64).
"""
self.auth_str = auth_str
self._authorize()
def logout(self):
"""Logout of an appliance."""
result = None
try:
result = self.delete("/access/v1", base_path="/api")
except RestClientError:
pass
self.headers.clear()
self.do_logout = False
return result
def islogin(self):
"""return if client is login."""
return self.do_logout
@staticmethod
def mkpath(*args, **kwargs):
"""Make a path?query string for making a REST request.
:cmd_params args: The path part.
:cmd_params kwargs: The query part.
"""
buf = six.StringIO()
query = "?"
for arg in args:
buf.write("/")
buf.write(arg)
for k in kwargs:
buf.write(query)
if query == "?":
query = "&"
buf.write(k)
buf.write("=")
buf.write(kwargs[k])
return buf.getvalue()
# pylint: disable=R0912
def request(self, path, request, body=None, **kwargs):
"""Make an HTTP request and return the results.
:param path: Path used with the initialized URL to make a request.
:param request: HTTP request type (GET, POST, PUT, DELETE).
:param body: HTTP body of request.
:key accept: Set HTTP 'Accept' header with this value.
:key base_path: Override the base_path for this request.
:key content: Set HTTP 'Content-Type' header with this value.
"""
out_hdrs = dict.copy(self.headers)
if kwargs.get("accept"):
out_hdrs['accept'] = kwargs.get("accept")
if body:
if isinstance(body, dict):
body = six.text_type(jsonutils.dumps(body))
if body and len(body):
out_hdrs['content-length'] = len(body)
zfssaurl = self._path(path, kwargs.get("base_path"))
req = urlrequest.Request(zfssaurl, body, out_hdrs)
req.get_method = lambda: request
maxreqretries = kwargs.get("maxreqretries", 10)
retry = 0
response = None
log_debug_msg(self, 'Request: %s %s' % (request, zfssaurl))
log_debug_msg(self, 'Out headers: %s' % out_hdrs)
if body and body != '':
log_debug_msg(self, 'Body: %s' % body)
while retry < maxreqretries:
try:
response = urlrequest.urlopen(req, timeout=self.timeout)
except urlerror.HTTPError as err:
if err.code == http_client.NOT_FOUND:
log_debug_msg(self, 'REST Not Found: %s' % err.code)
else:
log_debug_msg(self, ('REST Not Available: %s') % err.code)
if (err.code == http_client.SERVICE_UNAVAILABLE and
retry < maxreqretries):
retry += 1
time.sleep(1)
log_debug_msg(self, ('Server Busy retry request: %s')
% retry)
continue
if ((err.code == http_client.UNAUTHORIZED or
err.code == http_client.INTERNAL_SERVER_ERROR) and
'/access/v1' not in zfssaurl):
try:
log_debug_msg(self, ('Authorizing request: '
'%(zfssaurl)s'
'retry: %(retry)d .')
% {'zfssaurl': zfssaurl,
'retry': retry})
self._authorize()
req.add_header('x-auth-session',
self.headers['x-auth-session'])
except RestClientError:
log_debug_msg(self, ('Cannot authorize.'))
retry += 1
time.sleep(1)
continue
return RestResult(self.log_function, err=err)
except urlerror.URLError as err:
log_debug_msg(self, ('URLError: %s') % err.reason)
raise RestClientError(-1, name="ERR_URLError",
message=err.reason)
break
if ((response and
response.getcode() == http_client.SERVICE_UNAVAILABLE) and
retry >= maxreqretries):
raise RestClientError(response.getcode(), name="ERR_HTTPError",
message="REST Not Available: Disabled")
return RestResult(self.log_function, response=response)
def get(self, path, **kwargs):
"""Make an HTTP GET request.
:param path: Path to resource.
"""
return self.request(path, "GET", **kwargs)
def post(self, path, body="", **kwargs):
"""Make an HTTP POST request.
:param path: Path to resource.
:param body: Post data content.
"""
return self.request(path, "POST", body, **kwargs)
def put(self, path, body="", **kwargs):
"""Make an HTTP PUT request.
:param path: Path to resource.
:param body: Put data content.
"""
return self.request(path, "PUT", body, **kwargs)
def delete(self, path, **kwargs):
"""Make an HTTP DELETE request.
:param path: Path to resource that will be deleted.
"""
return self.request(path, "DELETE", **kwargs)
def head(self, path, **kwargs):
"""Make an HTTP HEAD request.
:param path: Path to resource.
"""
return self.request(path, "HEAD", **kwargs)
| |
#!/usr/bin/env python
#
# This program returns the gray matter segmentation given anatomical, spinal cord segmentation and t2star images
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Sara Dupont
# Modified: 2015-05-20
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sct_utils as sct
import os
import time
import sys
import getopt
from msct_parser import *
from msct_image import Image
from sct_asman import Model, Param, GMsegSupervisedMethod
from msct_gmseg_utils import *
class Pretreatments:
def __init__(self, target_fname, sc_seg_fname, t2_data=None):
self.t2star = 't2star.nii.gz'
self.sc_seg = 't2star_sc_seg.nii.gz'
self.t2 = 't2.nii.gz'
self.t2_seg = 't2_seg.nii.gz'
self.t2_landmarks = 't2_landmarks.nii.gz'
sct.run('cp ../' + target_fname + ' ./' + self.t2star)
sct.run('cp ../' + sc_seg_fname + ' ./' + self.sc_seg)
nx, ny, nz, nt, self.original_px, self.original_py, pz, pt = sct.get_dimension(self.t2star)
if round(self.original_px, 2) != 0.3 or round(self.original_py, 2) != 0.3:
self.t2star = resample_image(self.t2star)
self.sc_seg = resample_image(self.sc_seg, binary=True)
status, t2_star_orientation = sct.run('sct_orientation -i ' + self.t2star)
self.original_orientation = t2_star_orientation[4:7]
self.square_mask = crop_t2_star(self.t2star, self.sc_seg, box_size=75)
self.treated_target = self.t2star[:-7] + '_seg_in_croped.nii.gz'
self.level_fname = None
if t2_data is not None:
sct.run('cp ../' + t2_data[0] + ' ./' + self.t2)
sct.run('cp ../' + t2_data[1] + ' ./' + self.t2_seg)
sct.run('cp ../' + t2_data[2] + ' ./' + self.t2_landmarks)
self.level_fname = compute_level_file(self.t2star, self.sc_seg, self.t2, self.t2_seg, self.t2_landmarks)
class FullGmSegmentation:
def __init__(self, target_fname, sc_seg_fname, t2_data, level_fname, ref_gm_seg=None, model=None, param=None):
before = time.time()
self.param = param
sct.printv('\nBuilding the appearance model...', verbose=self.param.verbose, type='normal')
if model is None:
self.model = Model(model_param=self.param, k=0.8)
else:
self.model = model
sct.printv('\n--> OK !', verbose=self.param.verbose, type='normal')
self.target_fname = check_file_to_niigz(target_fname)
self.sc_seg_fname = check_file_to_niigz(sc_seg_fname)
self.t2_data = t2_data
self.ref_gm_seg = ref_gm_seg
self.tmp_dir = 'tmp_' + sct.extract_fname(self.target_fname)[1] + '_' + time.strftime("%y%m%d%H%M%S")
sct.run('mkdir ' + self.tmp_dir)
os.chdir(self.tmp_dir)
self.level_to_use = None
if level_fname is not None:
t2_data = None
if check_file_to_niigz('../' + level_fname):
sct.run('cp ../' + level_fname + ' .')
level_fname = sct.extract_fname(level_fname)[1]+sct.extract_fname(level_fname)[2]
sct.run('sct_orientation -i ' + level_fname + ' -s IRP')
self.level_to_use = sct.extract_fname(level_fname)[1] + '_IRP.nii.gz'
else:
self.level_to_use = level_fname
self.gm_seg = None
self.res_names = {}
self.dice_name = None
self.segmentation_pipeline()
os.chdir('..')
after = time.time()
sct.printv('Done! (in ' + str(after-before) + ' sec) \nTo see the result, type :')
if self.param.res_type == 'binary':
wm_col = 'Red'
gm_col = 'Blue'
b = '0,1'
else:
wm_col = 'Blue-Lightblue'
gm_col = 'Red-Yellow'
b = '0.5,1'
sct.printv('fslview ' + self.target_fname + ' -b 0,700 ' + self.res_names['wm_seg'] + ' -l ' + wm_col + ' -t 0.4 -b ' + b + ' ' + self.res_names['gm_seg'] + ' -l ' + gm_col + ' -t 0.4 -b ' + b + ' &', param.verbose, 'info')
# ------------------------------------------------------------------------------------------------------------------
def segmentation_pipeline(self):
sct.printv('\nDoing target pretreatments ...', verbose=self.param.verbose, type='normal')
self.pretreat = Pretreatments(self.target_fname, self.sc_seg_fname, self.t2_data)
if self.pretreat.level_fname is not None:
self.level_to_use = self.pretreat.level_fname
sct.printv('\nDoing target gray matter segmentation ...', verbose=self.param.verbose, type='normal')
self.gm_seg = GMsegSupervisedMethod(self.pretreat.treated_target, self.level_to_use, self.model, gm_seg_param=self.param)
sct.printv('\nDoing result post-treatments ...', verbose=self.param.verbose, type='normal')
self.post_treatments()
if self.ref_gm_seg is not None:
sct.printv('Computing Dice coefficient ...', verbose=self.param.verbose, type='normal')
self.dice_name = self.validation()
# ------------------------------------------------------------------------------------------------------------------
def post_treatments(self):
square_mask = Image(self.pretreat.square_mask)
tmp_res_names = []
for res_im in [self.gm_seg.res_wm_seg, self.gm_seg.res_gm_seg, self.gm_seg.corrected_wm_seg]:
res_im_original_space = inverse_square_crop(res_im, square_mask)
res_im_original_space.save()
sct.run('sct_orientation -i ' + res_im_original_space.file_name + '.nii.gz -s RPI')
res_name = sct.extract_fname(self.target_fname)[1] + res_im.file_name[len(self.pretreat.treated_target[:-7]):] + '.nii.gz'
if self.param.res_type == 'binary':
bin = True
else:
bin = False
old_res_name = resample_image(res_im_original_space.file_name + '_RPI.nii.gz', npx=self.pretreat.original_px, npy=self.pretreat.original_py, binary=bin)
if self.param.res_type == 'prob':
sct.run('fslmaths ' + old_res_name + ' -thr 0.05 ' + old_res_name)
sct.run('cp ' + old_res_name + ' ../' + res_name)
tmp_res_names.append(res_name)
self.res_names['wm_seg'] = tmp_res_names[0]
self.res_names['gm_seg'] = tmp_res_names[1]
self.res_names['corrected_wm_seg'] = tmp_res_names[2]
def validation(self):
name_ref_gm_seg = sct.extract_fname(self.ref_gm_seg)
im_ref_gm_seg = Image('../' + self.ref_gm_seg)
res_gm_seg_bin = Image('../' + self.res_names['gm_seg'])
res_wm_seg_bin = Image('../' + self.res_names['wm_seg'])
sct.run('cp ../' + self.ref_gm_seg + ' ./ref_gm_seg.nii.gz')
im_ref_wm_seg = inverse_gmseg_to_wmseg(im_ref_gm_seg, Image('../' + self.sc_seg_fname), 'ref_gm_seg')
im_ref_wm_seg.file_name = 'ref_wm_seg'
im_ref_wm_seg.ext = '.nii.gz'
im_ref_wm_seg.save()
if self.param.res_type == 'prob':
res_gm_seg_bin.data = np.asarray((res_gm_seg_bin.data >= 0.5).astype(int))
res_wm_seg_bin.data = np.asarray((res_wm_seg_bin.data >= 0.50001).astype(int))
res_gm_seg_bin.path = './'
res_gm_seg_bin.file_name = 'res_gm_seg_bin'
res_gm_seg_bin.ext = '.nii.gz'
res_gm_seg_bin.save()
res_wm_seg_bin.path = './'
res_wm_seg_bin.file_name = 'res_wm_seg_bin'
res_wm_seg_bin.ext = '.nii.gz'
res_wm_seg_bin.save()
try:
status_gm, output_gm = sct.run('sct_dice_coefficient ref_gm_seg.nii.gz res_gm_seg_bin.nii.gz -2d-slices 2', error_exit='warning', raise_exception=True)
except Exception:
sct.run('c3d res_gm_seg_bin.nii.gz ref_gm_seg.nii.gz -reslice-identity -o ref_in_res_space_gm.nii.gz ')
status_gm, output_gm = sct.run('sct_dice_coefficient ref_in_res_space_gm.nii.gz res_gm_seg_bin.nii.gz -2d-slices 2', error_exit='warning')
try:
status_wm, output_wm = sct.run('sct_dice_coefficient ref_wm_seg.nii.gz res_wm_seg_bin.nii.gz -2d-slices 2', error_exit='warning', raise_exception=True)
except Exception:
sct.run('c3d res_wm_seg_bin.nii.gz ref_wm_seg.nii.gz -reslice-identity -o ref_in_res_space_wm.nii.gz ')
status_wm, output_wm = sct.run('sct_dice_coefficient ref_in_res_space_wm.nii.gz res_wm_seg_bin.nii.gz -2d-slices 2', error_exit='warning')
dice_name = 'dice_' + self.param.res_type + '.txt'
dice_fic = open('../' + dice_name, 'w')
if self.param.res_type == 'prob':
dice_fic.write('WARNING : the probabilistic segmentations were binarized with a threshold at 0.5 to compute the dice coefficient \n')
dice_fic.write('\n--------------------------------------------------------------\nDice coefficient on the Gray Matter segmentation:\n')
dice_fic.write(output_gm)
dice_fic.write('\n\n--------------------------------------------------------------\nDice coefficient on the White Matter segmentation:\n')
dice_fic.write(output_wm)
dice_fic.close()
# sct.run(' mv ./' + dice_name + ' ../')
return dice_name
########################################################################################################################
# ------------------------------------------------------ MAIN ------------------------------------------------------- #
########################################################################################################################
if __name__ == "__main__":
param = Param()
input_target_fname = None
input_sc_seg_fname = None
input_t2_data = None
input_level_fname = None
input_ref_gm_seg = None
if param.debug:
print '\n*** WARNING: DEBUG MODE ON ***\n'
fname_input = param.path_dictionary + "/errsm_34.nii.gz"
fname_input = param.path_dictionary + "/errsm_34_seg_in.nii.gz"
else:
param_default = Param()
# Initialize the parser
parser = Parser(__file__)
parser.usage.set_description('Project all the input image slices on a PCA generated from set of t2star images')
parser.add_option(name="-i",
type_value="file",
description="T2star image you want to segment",
mandatory=True,
example='t2star.nii.gz')
parser.add_option(name="-s",
type_value="file",
description="Spinal cord segmentation of the T2star target",
mandatory=True,
example='sc_seg.nii.gz')
parser.add_option(name="-dic",
type_value="folder",
description="Path to the model data",
mandatory=True,
example='/home/jdoe/gm_seg_model_data/')
parser.add_option(name="-t2",
type_value=[[','], 'file'],
description="T2 data associated to the input image : used to register the template on the T2star and get the vertebral levels"
"In this order : t2 image, t2 segmentation, t2 landmarks (see: http://sourceforge.net/p/spinalcordtoolbox/wiki/create_labels/)",
mandatory=False,
default_value=None,
example='t2.nii.gz,t2_seg.nii.gz,landmarks.nii.gz')
parser.add_option(name="-l",
type_value="str",
description="Image containing level labels for the target or str indicating the level (if the target has only one slice)"
"If -l is used, no need to provide t2 data",
mandatory=False,
example='MNI-Poly-AMU_level_IRP.nii.gz')
parser.add_option(name="-first-reg",
type_value='multiple_choice',
description="Apply a Bspline registration using the spinal cord edges target --> model first",
mandatory=False,
default_value=0,
example=['0', '1'])
parser.add_option(name="-use-levels",
type_value='multiple_choice',
description="Use the level information for the model or not",
mandatory=False,
default_value=1,
example=['0', '1'])
parser.add_option(name="-weight",
type_value='float',
description="weight parameter on the level differences to compute the similarities (beta)",
mandatory=False,
default_value=1.2,
example=2.0)
parser.add_option(name="-z",
type_value='multiple_choice',
description="1: Z regularisation, 0: no ",
mandatory=False,
default_value=0,
example=['0', '1'])
parser.add_option(name="-res-type",
type_value='multiple_choice',
description="Type of result segmentation : binary or probabilistic",
mandatory=False,
default_value='binary',
example=['binary', 'prob'])
parser.add_option(name="-ref",
type_value="file",
description="Reference segmentation of the gray matter",
mandatory=False,
example='manual_gm_seg.nii.gz')
parser.add_option(name="-select-k",
type_value='multiple_choice',
description="Method used to select the k dictionary slices most similar to the target slice: with a threshold (thr) or by taking the first n slices (n)",
mandatory=False,
default_value='thr',
example=['thr', 'n'])
parser.add_option(name="-v",
type_value="int",
description="verbose: 0 = nothing, 1 = classic, 2 = expended",
mandatory=False,
default_value=0,
example='1')
arguments = parser.parse(sys.argv[1:])
input_target_fname = arguments["-i"]
input_sc_seg_fname = arguments["-s"]
param.path_dictionary = arguments["-dic"]
param.todo_model = 'load'
if "-t2" in arguments:
input_t2_data = arguments["-t2"]
if "-l" in arguments:
input_level_fname = arguments["-l"]
if "-first-reg" in arguments:
param.first_reg = bool(int(arguments["-first-reg"]))
if "-use-levels" in arguments:
param.use_levels = bool(int(arguments["-use-levels"]))
if "-weight" in arguments:
param.weight_beta = arguments["-weight"]
if "-res-type" in arguments:
param.res_type = arguments["-res-type"]
if "-z" in arguments:
param.z_regularisation = bool(int(arguments["-z"]))
if "-ref" in arguments:
input_ref_gm_seg = arguments["-ref"]
if "-select-k" in arguments:
param.select_k = arguments["-select-k"]
if "-v" in arguments:
param.verbose = arguments["-v"]
gmsegfull = FullGmSegmentation(input_target_fname, input_sc_seg_fname, input_t2_data, input_level_fname, ref_gm_seg=input_ref_gm_seg, param=param)
| |
'''
Individual stages of the pipeline implemented as functions from
input files to output files.
The run_stage function knows everything about submitting jobs and, given
the state parameter, has full access to the state of the pipeline, such
as config, options, DRMAA and the logger.
'''
from utils import safe_make_dir
from runner import run_stage
import os
# PICARD_JAR = '$PICARD_HOME/lib/picard-1.69.jar'
# PICARD_JAR = '/vlsci/VR0002/kmahmood/Programs/Picard/picard-tools-2.8.3/picard.jar'
PICARD_JAR = '/usr/local/easybuild/software/picard/2.3.0/picard.jar'
SNPEFF_JAR = '/usr/local/easybuild/software/snpEff/4.1d-Java-1.7.0_80/snpEff.jar'
GATK_JAR = '$GATK_HOME/GenomeAnalysisTK.jar'
def java_command(jar_path, mem_in_gb, command_args):
'''Build a string for running a java command'''
# Bit of room between Java's max heap memory and what was requested.
# Allows for other Java memory usage, such as stack.
java_mem = mem_in_gb - 2
return 'java -Xmx{mem}g -jar {jar_path} {command_args}'.format(
jar_path=jar_path, mem=java_mem, command_args=command_args)
def run_java(state, stage, jar_path, mem, args):
command = java_command(jar_path, mem, args)
run_stage(state, stage, command)
class Stages(object):
def __init__(self, state):
self.state = state
self.reference = self.get_options('ref_grch37')
self.dbsnp_hg19 = self.get_options('dbsnp_hg19')
self.mills_hg19 = self.get_options('mills_hg19')
self.one_k_g_snps = self.get_options('one_k_g_snps')
self.one_k_g_indels = self.get_options('one_k_g_indels')
self.one_k_g_highconf_snps = self.get_options('one_k_g_highconf_snps')
self.hapmap = self.get_options('hapmap')
# self.interval_hg19 = self.get_options('exome_bed_hg19')
# self.CEU_mergeGvcf = self.get_options('CEU_mergeGvcf')
self.snpeff_conf = self.get_options('snpeff_conf')
self.bamclipper = self.get_options('bamclipper')
self.vep_path = self.get_options('vep_path')
self.vt_path = self.get_options('vt_path')
self.coord_file = self.get_options('coord_file')
self.target_bed = self.get_options('target_bed')
# self.interval_file = self.get_options('interval_file')
self.primer_file = self.get_options('primer_file')
self.primer_bedpe_file = self.get_options('primer_bedpe_file')
self.proportionthresh = self.get_options('proportionthresh')
self.absthresh = self.get_options('absthresh')
self.maxvariants = self.get_options('maxvariants')
# self.fragment_bed = self.get_options('fragment_bed')
self.annolua = self.get_options('annolua')
self.anno = self.get_options('anno')
self.hrfile = self.get_options('hrfile')
self.other_vep = self.get_options('other_vep')
self.snpeff_path = self.get_options('snpeff_path')
self.gatk_bed = self.get_options('gatk_bed')
# self.GBR_mergeGvcf = self.get_options('GBR_mergeGvcf')
# self.FIN_mergeGvcf = self.get_options('FIN_mergeGvcf')
def run_picard(self, stage, args):
mem = int(self.state.config.get_stage_options(stage, 'mem'))
return run_java(self.state, stage, PICARD_JAR, mem, args)
def run_snpeff(self, stage, args):
mem = int(self.state.config.get_stage_options(stage, 'mem'))
return run_java(self.state, stage, SNPEFF_JAR, mem, args)
def run_gatk(self, stage, args):
mem = int(self.state.config.get_stage_options(stage, 'mem'))
return run_java(self.state, stage, GATK_JAR, mem, args)
def get_stage_options(self, stage, *options):
return self.state.config.get_stage_options(stage, *options)
def get_options(self, *options):
return self.state.config.get_options(*options)
def original_fastqs(self, output):
'''Original fastq files'''
# print output
pass
def align_bwa(self, inputs, bam_out, sample_id, read_id, lane, lib):
# def align_bwa(self, inputs, bam_out, sample_id):
'''Align the paired end fastq files to the reference genome using bwa'''
fastq_read1_in, fastq_read2_in = inputs
cores = self.get_stage_options('align_bwa', 'cores')
safe_make_dir('alignments/{sample}_{readid}'.format(sample=sample_id, readid=read_id))
read_group = '"@RG\\tID:{readid}\\tSM:{sample}_{readid}\\tPU:lib1\\tLN:{lane}\\tPL:Illumina"' \
.format(readid=read_id, lib=lib, lane=lane, sample=sample_id)
command = 'bwa mem -M -t {cores} -R {read_group} {reference} {fastq_read1} {fastq_read2} ' \
'| samtools view -b -h -o {bam} -' \
.format(cores=cores,
read_group=read_group,
fastq_read1=fastq_read1_in,
fastq_read2=fastq_read2_in,
reference=self.reference,
bam=bam_out)
run_stage(self.state, 'align_bwa', command)
def apply_undr_rover(self, inputs, vcf_output, sample_id, readid):
# def align_bwa(self, inputs, bam_out, sample_id):
'''Apply undr_rover to call variants from paired end fastq files'''
fastq_read1_in, fastq_read2_in = inputs
cores = self.get_stage_options('apply_undr_rover', 'cores')
safe_make_dir('variants/undr_rover')
safe_make_dir('variants/undr_rover/coverdir')
coverfile = "variants/undr_rover/coverdir/" + sample_id + "_" + readid + ".coverage"
# read_group = '"@RG\\tID:{readid}\\tSM:{sample}_{readid}\\tPU:lib1\\tLN:{lane}\\tPL:Illumina"' \
# .format(readid=read_id, lib=lib, lane=lane, sample=sample_id)
command = 'undr_rover --primer_coords {coord_file} ' \
'--primer_sequences {primer_file} ' \
'--reference {reference} ' \
'--out {vcf_output} ' \
'--coverfile {coverfile} ' \
'--proportionthresh {proportionthresh} ' \
'--absthresh {absthresh} ' \
'--max_variants {maxvariants} ' \
'{fastq_read1} {fastq_read2}'.format(
coord_file=self.coord_file, primer_file=self.primer_file,
reference=self.reference,
vcf_output=vcf_output,
#coverdir=self.coverdir,
proportionthresh=self.proportionthresh,
absthresh=self.absthresh,
maxvariants=self.maxvariants,
coverfile=coverfile,
fastq_read1=fastq_read1_in,
fastq_read2=fastq_read2_in)
run_stage(self.state, 'apply_undr_rover', command)
def clip_bam(self, bam_in, sorted_bam_out):
'''Clip the BAM file using Bamclipper'''
bamclipper_args = '{bamclipper} -b {bam_in} -p {primer_bedpe_file} -n 1'.format(
bamclipper=self.bamclipper, bam_in=bam_in, primer_bedpe_file=self.primer_bedpe_file)
run_stage(self.state, 'clip_bam', bamclipper_args)
def sort_bam_picard(self, bam_in, sorted_bam_out):
'''Sort the BAM file using Picard'''
picard_args = 'SortSam INPUT={bam_in} OUTPUT={sorted_bam_out} ' \
'VALIDATION_STRINGENCY=LENIENT SORT_ORDER=coordinate ' \
'MAX_RECORDS_IN_RAM=5000000 CREATE_INDEX=True'.format(
bam_in=bam_in, sorted_bam_out=sorted_bam_out)
self.run_picard('sort_bam_picard', picard_args)
def primary_bam(self, bam_in, sbam_out):
'''On keep primary alignments in the BAM file using samtools'''
command = 'samtools view -h -q 1 -f 2 -F 4 -F 8 -F 256 -b ' \
'-o {sbam_out} {bam_in}'.format(
bam_in=bam_in, sbam_out=sbam_out)
run_stage(self.state, 'primary_bam', command)
# index sorted bam file
def index_sort_bam_picard(self, bam_in, bam_index):
'''Index sorted bam using samtools'''
command = 'samtools index {bam_in} {bam_index}'.format(
bam_in=bam_in, bam_index=bam_index)
run_stage(self.state, 'index_sort_bam_picard', command)
##########
def call_haplotypecaller_gatk(self, bam_in, vcf_out):
'''Call variants using GATK'''
safe_make_dir('variants/gatk')
# safe_make_dir('variants}'.format(sample=sample_id))
gatk_args = "-T HaplotypeCaller -R {reference} --min_base_quality_score 20 " \
"--emitRefConfidence GVCF " \
"-A AlleleBalance -A AlleleBalanceBySample " \
"-A ChromosomeCounts -A ClippingRankSumTest " \
"-A Coverage -A DepthPerAlleleBySample " \
"-A DepthPerSampleHC -A FisherStrand " \
"-A GCContent -A GenotypeSummaries " \
"-A HardyWeinberg -A HomopolymerRun " \
"-A LikelihoodRankSumTest -A LowMQ " \
"-A MappingQualityRankSumTest -A MappingQualityZero " \
"-A QualByDepth " \
"-A RMSMappingQuality -A ReadPosRankSumTest " \
"-A SampleList -A SpanningDeletions " \
"-A StrandBiasBySample -A StrandOddsRatio " \
"-A TandemRepeatAnnotator -A VariantType " \
"--dontUseSoftClippedBases " \
"-I {bam} -o {out}".format(reference=self.reference,
bam=bam_in, out=vcf_out)
self.run_gatk('call_haplotypecaller_gatk', gatk_args)
def combine_gvcf_gatk(self, vcf_files_in, vcf_out):
'''Combine G.VCF files for all samples using GATK'''
g_vcf_files = ' '.join(['--variant ' + vcf for vcf in vcf_files_in])
gatk_args = "-T CombineGVCFs -R {reference} -L {gatk_bed} " \
"--disable_auto_index_creation_and_locking_when_reading_rods " \
"{g_vcf_files} -o {vcf_out}".format(reference=self.reference, gatk_bed=self.gatk_bed,
g_vcf_files=g_vcf_files, vcf_out=vcf_out)
self.run_gatk('combine_gvcf_gatk', gatk_args)
def genotype_gvcf_gatk(self, combined_vcf_in, vcf_out):
'''Genotype G.VCF files using GATK'''
cores = self.get_stage_options('genotype_gvcf_gatk', 'cores')
gatk_args = "-T GenotypeGVCFs -R {reference} " \
"--disable_auto_index_creation_and_locking_when_reading_rods " \
"--dbsnp {dbsnp} -L {gatk_bed} " \
"--num_threads {cores} --variant {combined_vcf} --out {vcf_out}" \
.format(reference=self.reference, dbsnp=self.dbsnp_hg19, gatk_bed=self.gatk_bed,
cores=cores, combined_vcf=combined_vcf_in, vcf_out=vcf_out)
self.run_gatk('genotype_gvcf_gatk', gatk_args)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division
import re
import six
from nibble import decorators, Information, Duration
@decorators.python_2_div_compatible
@decorators.python_2_nonzero_compatible
@six.python_2_unicode_compatible
class Speed(object):
"""
Represents a quantity of information processed over a period of time.
"""
# matches a duration with a unit
DURATION_REGEX = re.compile(r'^(\d+\.?\d*)\s*(\w+)')
def __init__(self, information, duration=Duration.SECOND):
"""
Initialise a new speed measurement.
:param information: The information processed.
:param duration: The time taken to process the information. Defaults to
one second.
"""
if duration == Duration.ZERO:
raise ValueError('Speed cannot be infinite')
self.information = information
self.duration = duration
@classmethod
def from_quantity_units(cls, quantity, information_unit, duration_unit):
"""
Initialise a new speed object from a quantity and unit string.
:param quantity: The number of the unit.
:param information_unit: The information part of the unit, e.g. 'GiB'.
:param duration_unit: The duration part of the unit, e.g. 'week'.
:return: A `Speed` object representing the quantity and unit.
"""
information = Information.from_quantity_unit(quantity, information_unit)
duration = Duration.from_quantity_unit(1, duration_unit)
return Speed(information, duration)
@property
def _per_second(self):
"""
The amount of information processed per second at this speed. This
normalised value is used for comparing speeds.
:return: The amount of information processed per second at this speed.
"""
return self.information / self.duration.total_seconds()
def for_duration(self, duration):
"""
Find the quantity of information processed if this speed is maintained
for a certain amount of time.
:param duration: The amount of time this speed is maintained for.
:return: The resulting amount of information processed.
"""
scale = duration.nanoseconds / self.duration.nanoseconds
return self.information * scale
@decorators.operator_same_class
def __lt__(self, other):
return self._per_second < other._per_second
def __le__(self, other):
return self < other or self == other
@decorators.operator_same_class
def __eq__(self, other):
return self._per_second == other._per_second
def __ne__(self, other):
return not other == self
def __ge__(self, other):
return self == other or self > other
@decorators.operator_same_class
def __gt__(self, other):
return self._per_second > other._per_second
@decorators.operator_same_class
def __add__(self, other):
return Speed(self._per_second + other._per_second)
@decorators.operator_same_class
def __sub__(self, other):
try:
difference = self._per_second - other._per_second
if difference == Information.ZERO:
raise ArithmeticError('Cannot have an infinite speed')
return Speed(difference)
except ArithmeticError:
# negative result
raise ArithmeticError('Cannot have a negative speed')
@decorators.operator_numeric_type
def __mul__(self, other):
return Speed(self.information * other, self.duration)
@decorators.operator_numeric_type
def __truediv__(self, other):
if other == 0:
raise ZeroDivisionError('Cannot divide {0} by zero'.format(self))
return Speed(self.information / other, self.duration)
@decorators.operator_numeric_type
def __floordiv__(self, other):
if other == 0:
raise ZeroDivisionError('Cannot divide {0} by zero'.format(self))
return Speed(self.information // other, self.duration)
def __repr__(self):
return '<Speed({0}, {1})>'.format(repr(self.information),
repr(self.duration))
def __bool__(self):
return self._per_second > Information.ZERO
@decorators.python_2_format_compatible
def __format__(self, format_spec):
# Defaults to <the most appropriate binary bytes unit> per second
# [number format|][ ][unit symbol or category][/[quantity][ ]time unit]
lhs, _, time = format_spec.partition('/')
if time:
match = self.DURATION_REGEX.match(time)
if match:
# quantity provided
quantity = float(match.group(1))
if quantity.is_integer():
quantity = int(quantity)
unit = match.group(2)
else:
# no quantity
quantity = 1
unit = time
if not lhs:
# this is a workaround to maintain the separator '/m' should
# result in a separator not being printed, but '' is passed to
# Information as the lhs, so it goes to default formatting
lhs = 'bB'
else:
quantity = 1
unit = 's'
try:
nanos = quantity * Duration.unit_nanoseconds(unit)
except ValueError as e:
raise TypeError(e)
information = self.information * nanos / self.duration.nanoseconds
time_fmt = unit if quantity == 1 else '{0}{1}'.format(quantity, unit)
return '{0:{1}}/{2}'.format(information, lhs, time_fmt)
def __str__(self):
return '{0}'.format(self)
Speed.ZERO = Speed(Information.ZERO)
# Ethernet
Speed.TEN_MEGABIT = Speed(Information(10, Information.MEGABITS))
Speed.HUNDRED_MEGABIT = Speed.TEN_MEGABIT * 10
Speed.GIGABIT = Speed.HUNDRED_MEGABIT * 10
Speed.TEN_GIGABIT = Speed.GIGABIT * 10
Speed.FORTY_GIGABIT = Speed.TEN_GIGABIT * 4
Speed.HUNDRED_GIGABIT = Speed.TEN_GIGABIT * 10
# E-carrier
Speed.E0 = Speed(Information(64, Information.KILOBITS))
Speed.E1 = Speed(Information(2.048, Information.MEGABITS))
Speed.E2 = Speed(Information(8.448, Information.MEGABITS))
Speed.E3 = Speed(Information(34.368, Information.MEGABITS))
Speed.E4 = Speed(Information(139.264, Information.MEGABITS))
Speed.E5 = Speed(Information(565.148, Information.MEGABITS))
# T-carrier signaling
Speed.DS0 = Speed.E0
Speed.DS1 = Speed(Information(1.544, Information.MEGABITS))
Speed.DS1C = Speed(Information(3.152, Information.MEGABITS))
Speed.DS2 = Speed(Information(6.312, Information.MEGABITS))
Speed.DS3 = Speed(Information(44.736, Information.MEGABITS))
Speed.DS4 = Speed(Information(274.176, Information.MEGABITS))
Speed.DS5 = Speed(Information(400.352, Information.MEGABITS))
# T-carrier lines
Speed.T1 = Speed.DS1
Speed.T1C = Speed.DS1C
Speed.T2 = Speed.DS2
Speed.T3 = Speed.DS3
Speed.T4 = Speed.DS4
Speed.T5 = Speed.DS5
| |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import multiprocessing
import jsonschema
import mock
from rally.plugins.common.runners import serial
from rally.task import runner
from rally.task import scenario
from tests.unit import fakes
from tests.unit import test
BASE = "rally.task.runner."
class ScenarioRunnerHelpersTestCase(test.TestCase):
@mock.patch(BASE + "utils.format_exc")
def test_format_result_on_timeout(self, mock_format_exc):
mock_exc = mock.MagicMock()
expected = {
"duration": 100,
"idle_duration": 0,
"scenario_output": {"errors": "", "data": {}},
"atomic_actions": {},
"error": mock_format_exc.return_value
}
self.assertEqual(runner.format_result_on_timeout(mock_exc, 100),
expected)
mock_format_exc.assert_called_once_with(mock_exc)
@mock.patch(BASE + "context.ContextManager")
def test_get_scenario_context(self, mock_context_manager):
mock_context_obj = mock.MagicMock()
mock_map_for_scenario = (
mock_context_manager.return_value.map_for_scenario)
self.assertEqual(mock_map_for_scenario.return_value,
runner._get_scenario_context(mock_context_obj))
mock_context_manager.assert_called_once_with(mock_context_obj)
mock_map_for_scenario.assert_called_once_with()
def test_run_scenario_once_internal_logic(self):
context = runner._get_scenario_context(
fakes.FakeContext({}).context)
scenario_cls = mock.MagicMock()
args = (2, scenario_cls, "test", context, {})
runner._run_scenario_once(args)
expected_calls = [
mock.call(context),
mock.call().test(),
mock.call().idle_duration(),
mock.call().idle_duration(),
mock.call().atomic_actions()
]
scenario_cls.assert_has_calls(expected_calls, any_order=True)
@mock.patch(BASE + "rutils.Timer", side_effect=fakes.FakeTimer)
def test_run_scenario_once_without_scenario_output(self, mock_timer):
args = (1, fakes.FakeScenario, "do_it", mock.MagicMock(), {})
result = runner._run_scenario_once(args)
expected_result = {
"duration": fakes.FakeTimer().duration(),
"timestamp": fakes.FakeTimer().timestamp(),
"idle_duration": 0,
"error": [],
"scenario_output": {"errors": "", "data": {}},
"atomic_actions": {}
}
self.assertEqual(expected_result, result)
@mock.patch(BASE + "rutils.Timer", side_effect=fakes.FakeTimer)
def test_run_scenario_once_with_scenario_output(self, mock_timer):
args = (1, fakes.FakeScenario, "with_output", mock.MagicMock(), {})
result = runner._run_scenario_once(args)
expected_result = {
"duration": fakes.FakeTimer().duration(),
"timestamp": fakes.FakeTimer().timestamp(),
"idle_duration": 0,
"error": [],
"scenario_output": fakes.FakeScenario().with_output(),
"atomic_actions": {}
}
self.assertEqual(expected_result, result)
@mock.patch(BASE + "rutils.Timer", side_effect=fakes.FakeTimer)
def test_run_scenario_once_exception(self, mock_timer):
args = (1, fakes.FakeScenario, "something_went_wrong",
mock.MagicMock(), {})
result = runner._run_scenario_once(args)
expected_error = result.pop("error")
expected_result = {
"duration": fakes.FakeTimer().duration(),
"timestamp": fakes.FakeTimer().timestamp(),
"idle_duration": 0,
"scenario_output": {"errors": "", "data": {}},
"atomic_actions": {}
}
self.assertEqual(expected_result, result)
self.assertEqual(expected_error[:2],
["Exception", "Something went wrong"])
class ScenarioRunnerResultTestCase(test.TestCase):
def test_validate(self):
config = [
{
"duration": 1.0,
"idle_duration": 1.0,
"scenario_output": {
"data": {"test": 1.0},
"errors": "test error string 1"
},
"atomic_actions": {"test1": 1.0},
"error": []
},
{
"duration": 2.0,
"idle_duration": 2.0,
"scenario_output": {
"data": {"test": 2.0},
"errors": "test error string 2"
},
"atomic_actions": {"test2": 2.0},
"error": ["a", "b", "c"]
}
]
self.assertEqual(config[0], runner.ScenarioRunnerResult(config[0]))
self.assertEqual(config[1], runner.ScenarioRunnerResult(config[1]))
def test_validate_failed(self):
config = {"a": 10}
self.assertRaises(jsonschema.ValidationError,
runner.ScenarioRunnerResult, config)
class ScenarioRunnerTestCase(test.TestCase):
@mock.patch(BASE + "rutils.Timer.duration", return_value=10)
def test_run(self, mock_timer_duration):
runner_obj = serial.SerialScenarioRunner(
mock.MagicMock(),
mock.MagicMock())
runner_obj._run_scenario = mock.MagicMock()
scenario_name = "NovaServers.boot_server_from_volume_and_delete"
config_kwargs = {"image": {"id": 1}, "flavor": {"id": 1}}
context_obj = {
"task": runner_obj.task,
"scenario_name": scenario_name,
"admin": {"endpoint": mock.MagicMock()},
"config": {
"cleanup": ["nova", "cinder"], "some_ctx": 2, "users": {}
}
}
result = runner_obj.run(scenario_name, context_obj, config_kwargs)
self.assertEqual(result, mock_timer_duration.return_value)
self.assertEqual(list(runner_obj.result_queue), [])
cls_name, method_name = scenario_name.split(".", 1)
cls = scenario.Scenario.get_by_name(cls_name)
expected_config_kwargs = {"image": 1, "flavor": 1}
runner_obj._run_scenario.assert_called_once_with(
cls, method_name, context_obj, expected_config_kwargs)
def test_runner_send_result_exception(self):
runner_obj = serial.SerialScenarioRunner(
mock.MagicMock(),
mock.MagicMock())
self.assertRaises(
jsonschema.ValidationError,
lambda: runner_obj._send_result(mock.MagicMock()))
def test_abort(self):
runner_obj = serial.SerialScenarioRunner(
mock.MagicMock(),
mock.MagicMock())
self.assertFalse(runner_obj.aborted.is_set())
runner_obj.abort()
self.assertTrue(runner_obj.aborted.is_set())
def test__create_process_pool(self):
runner_obj = serial.SerialScenarioRunner(
mock.MagicMock(),
mock.MagicMock())
processes_to_start = 10
def worker_process(i):
pass
counter = ((i,) for i in range(100))
process_pool = runner_obj._create_process_pool(
processes_to_start, worker_process, counter)
self.assertEqual(processes_to_start, len(process_pool))
for process in process_pool:
self.assertIsInstance(process, multiprocessing.Process)
@mock.patch(BASE + "ScenarioRunner._send_result")
def test__join_processes(self, mock_scenario_runner__send_result):
process = mock.MagicMock(is_alive=mock.MagicMock(return_value=False))
processes = 10
process_pool = collections.deque([process] * processes)
mock_result_queue = mock.MagicMock(
empty=mock.MagicMock(return_value=True))
runner_obj = serial.SerialScenarioRunner(
mock.MagicMock(),
mock.MagicMock())
runner_obj._join_processes(process_pool, mock_result_queue)
self.assertEqual(processes, process.join.call_count)
mock_result_queue.close.assert_called_once_with()
| |
import json
from datetime import datetime
from sqlalchemy import desc
from flask_babel import lazy_gettext
from c3bottles import app, db
from c3bottles.model.category import Category, all_categories
from c3bottles.model.location import Location
from c3bottles.model.report import Report
from c3bottles.model.visit import Visit
class DropPoint(db.Model):
"""
A location in the venue for visitors to drop their empty bottles.
A drop point consists of a sign "bottle drop point <number>" at the
wall which tells visitors that a drop point should be present there
and a number of empty crates to drop bottles into.
If the `removed` column is not null, the drop point has been removed
from the venue (numbers are not reassigned).
Each drop point is referenced by a unique number, which is
consequently the primary key to identify a specific drop point. Since
the location of drop points may change over time, it is not simply
saved in the table of drop points but rather a class itself.
"""
number = db.Column(db.Integer, primary_key=True, autoincrement=False)
category_id = db.Column(db.Integer, nullable=False, default=1)
time = db.Column(db.DateTime)
removed = db.Column(db.DateTime)
locations = db.relationship("Location", order_by="Location.time")
reports = db.relationship("Report", lazy="dynamic")
visits = db.relationship("Visit", lazy="dynamic")
def __init__(
self,
number,
category_id=0,
description=None,
lat=None,
lng=None,
level=None,
time=None,
):
"""
Create a new drop point object.
New drop point objects will be added to the database automatically
if the creation was successful. A location will also be added
automatically.
:raises ValueError: If an error occurred during creation of the drop
point. The error message will contain a tuple of dicts which
indicate in which part of the creation the error occurred.
"""
errors = []
try:
self.number = int(number)
except (TypeError, ValueError):
errors.append({"number": lazy_gettext("Drop point number is not a number.")})
else:
if self.number < 1:
errors.append({"number": lazy_gettext("Drop point number is not positive.")})
with db.session.no_autoflush:
if DropPoint.query.get(self.number):
errors.append({"number": lazy_gettext("That drop point already exists.")})
if category_id in all_categories:
self.category_id = category_id
else:
errors.append({"cat_id": lazy_gettext("Invalid drop point category.")})
if time and not isinstance(time, datetime):
errors.append({"DropPoint": lazy_gettext("Creation time not a datetime object.")})
if isinstance(time, datetime) and time > datetime.today():
errors.append({"DropPoint": lazy_gettext("Creation time in the future.")})
self.time = time if time else datetime.today()
try:
Location(
self,
time=self.time,
description=description,
lat=lat,
lng=lng,
level=level
)
except ValueError as e:
errors += e.args
if errors:
raise ValueError(*errors)
db.session.add(self)
def remove(self, time=None):
"""
Remove a drop point.
This will not actually purge a drop point from the database but
simply mark it as removed so it will no longer show up in the
frontend. The time of removal can be given optionally and will
default to :func:`datetime.today()`.
"""
if self.removed:
raise RuntimeError({"DropPoint": lazy_gettext("Drop point already removed.")})
if time and not isinstance(time, datetime):
raise TypeError({"DropPoint": lazy_gettext("Removal time not a datetime object.")})
if time and time > datetime.today():
raise ValueError({"DropPoint": lazy_gettext("Removal time in the future.")})
self.removed = time if time else datetime.today()
def report(self, state=None, time=None):
"""
Submit a report for a drop point.
"""
Report(self, time=time, state=state)
def visit(self, action=None, time=None):
"""
Perform a visit of a drop point.
"""
Visit(self, time=time, action=action)
@property
def category(self):
return Category.get(self.category_id)
@property
def level(self):
return self.locations[-1].level if self.locations else None
@property
def lat(self):
return self.locations[-1].lat if self.locations else None
@property
def lng(self):
return self.locations[-1].lng if self.locations else None
@property
def description(self):
return self.locations[-1].description if self.locations else None
@property
def description_with_level(self):
map_source = app.config.get("MAP_SOURCE", {})
if len(map_source.get("level_config", [])) > 1:
return lazy_gettext(
"%(location)s on level %(level)i",
location=self.description if self.description else lazy_gettext("somewhere"),
level=self.level
)
else:
return self.description if self.description else lazy_gettext("somewhere")
@property
def location(self):
return self.locations[-1] if self.locations else None
@property
def total_report_count(self):
return self.reports.count()
@property
def new_report_count(self):
last_visit = self.last_visit
if last_visit:
return self.reports \
.filter(Report.time > last_visit.time) \
.count()
else:
return self.total_report_count
@property
def last_state(self):
"""
Get the current state of a drop point.
The state is influenced by two mechanisms:
* Reports: a report will always set the drop point to the state
that has been reported by the reporter, irrespective of the
state before.
* Visits: If a visit was performed since the last report, the
drop point is now either empty or unchanged and therefore,
if the drop point was emptied, the empty state is returned.
If the drop point was not emptied during the visit, the last
reported state will be returned.
If neither reports nor visits have been recorded yet or only visits
without any actions, the drop point state is returned as new.
"""
last_report = self.last_report
last_visit = self.last_visit
if last_report is not None and last_visit is not None:
if last_visit.time > last_report.time:
visits = self.visits \
.filter(Visit.time > last_report.time) \
.order_by(Visit.time.desc()) \
.all()
for visit in visits:
if visit.action == Visit.actions[0]:
return Report.states[-1]
return last_report.state
if last_report is not None:
return last_report.state
if last_visit is not None:
if last_visit.action == Visit.actions[0]:
return Report.states[-1]
return Report.states[1]
@property
def last_report(self):
"""
Get the last report of a drop point.
"""
return self.reports.order_by(Report.time.desc()).first()
@property
def last_visit(self):
"""
Get the last visit of a drop point.
"""
return self.visits.order_by(Visit.time.desc()).first()
@property
def new_reports(self):
"""
Get the reports since the last visit of a drop point.
This method returns all reports for this drop point since the last
visit ordered descending by time, i.e. the newest report is the first
in the list. If no visits have been recorded yet, all reports are
returned.
"""
last_visit = self.last_visit
if last_visit:
return self.reports \
.filter(Report.time > last_visit.time) \
.order_by(Report.time.desc()) \
.all()
else:
return self.reports.order_by(Report.time.desc()).all()
@property
def history(self):
history = []
for visit in self.visits.all():
history.append({"time": visit.time, "visit": visit})
for report in self.reports.all():
history.append({"time": report.time, "report": report})
for location in self.locations:
history.append({"time": location.time, "location": location})
history.append({"time": self.time, "drop_point": self})
if self.removed:
history.append({"time": self.removed, "removed": True})
return sorted(history, key=lambda k: k["time"], reverse=True)
@property
def visit_interval(self):
"""
Get the visit interval for this drop point.
This method returns the visit interval for this drop point
in seconds.
This is not implemented as a static method or a constant
since in the future the visit interval might depend on
the location of drop points, time of day or a combination
of those.
"""
return 60 * app.config.get("BASE_VISIT_INTERVAL", 120)
@property
def priority_factor(self):
"""
Get the priority factor.
This factor determines how often a drop point should be visited.
The factor depends on the severity of the reports submitted.
"""
# The priority of a removed drop point obviously is always 0.
if self.removed:
return 0
new_reports = self.new_reports
# This is the starting priority. The report weight should
# be scaled relative to 1, so this can be interpreted as a
# number of standing default reports ensuring that every
# drop point's priority increases slowly if it is not
# visited even if no real reports come in.
priority = app.config.get("DEFAULT_VISIT_PRIORITY", 1)
i = 0
for report in new_reports:
priority += report.get_weight() / 2**i
i += 1
priority /= (1.0 * self.visit_interval)
return priority
@property
def priority_base_time(self):
"""
Get the base time for visit priority calculation of a drop point.
This is either the time of the last visit, or, if no visit has been
performed yet, the creation time of the drop point.
"""
if self.last_visit:
return self.last_visit.time
else:
return self.time
@property
def priority(self):
"""
Get the priority to visit this drop point.
The priority to visit a drop point mainly depends on the
number and weight of reports since the last visit.
In addition, priority increases with time since the last
visit even if the states of reports indicate a low priority.
This ensures that every drop point is visited from time to
time.
"""
priority = self.priority_factor * \
(datetime.today() - self.priority_base_time).total_seconds()
return round(priority, 2)
@classmethod
def get_dp_info(cls, number):
dp = cls.query.get(number)
if dp is not None:
return {
"number": dp.number,
"category_id": dp.category_id,
"category": str(dp.category),
"description": dp.description,
"description_with_level": str(dp.description_with_level),
"reports_total": dp.total_report_count,
"reports_new": dp.new_report_count,
"priority": dp.priority,
"priority_factor": dp.priority_factor,
"base_time": dp.priority_base_time.strftime("%s"),
"last_state": dp.last_state,
"removed": True if dp.removed else False,
"lat": dp.lat,
"lng": dp.lng,
"level": dp.level
}
else:
return None
@classmethod
def get_dp_json(cls, number):
"""
Get a JSON string characterizing a drop point.
This returns a JSON representation of the dict constructed by
:meth:`get_dp_info()`.
"""
return json.dumps(
{number: cls.get_dp_info(number)},
indent=4 if app.debug else None
)
@staticmethod
def get_dps_json(time=None):
"""
Get drop points as a JSON string.
If a time has been given as optional parameters, only drop points
are returned that have changes since that time stamp, i.e. have
been created, visited, reported or changed their location.
"""
if time is None:
dps = DropPoint.query.all()
else:
dp_set = set()
dp_set.update(
[dp for dp in DropPoint.query.filter(DropPoint.time > time).all()],
[l.dp for l in Location.query.filter(Location.time > time).all()],
[v.dp for v in Visit.query.filter(Visit.time > time).all()],
[r.dp for r in Report.query.filter(Report.time > time).all()]
)
dps = list(dp_set)
ret = {}
for dp in dps:
ret[dp.number] = DropPoint.get_dp_info(dp.number)
return json.dumps(ret, indent=4 if app.debug else None)
@staticmethod
def get_next_free_number():
"""
Get the next free drop point number.
"""
last = DropPoint.query \
.order_by(desc(DropPoint.number)) \
.limit(1).first()
if last:
return last.number + 1
else:
return 1
def __repr__(self):
return "Drop point %s (%s)" % (
self.number,
"inactive" if self.removed else "active"
)
| |
# Copyright (c) 2010 Chris Moyer http://coredumped.org/
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
RECORD_TYPES = ['A', 'AAAA', 'TXT', 'CNAME', 'MX', 'PTR', 'SRV', 'SPF']
from boto.resultset import ResultSet
class ResourceRecordSets(ResultSet):
"""
A list of resource records.
:ivar hosted_zone_id: The ID of the hosted zone.
:ivar comment: A comment that will be stored with the change.
:ivar changes: A list of changes.
"""
ChangeResourceRecordSetsBody = """<?xml version="1.0" encoding="UTF-8"?>
<ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2013-04-01/">
<ChangeBatch>
<Comment>%(comment)s</Comment>
<Changes>%(changes)s</Changes>
</ChangeBatch>
</ChangeResourceRecordSetsRequest>"""
ChangeXML = """<Change>
<Action>%(action)s</Action>
%(record)s
</Change>"""
def __init__(self, connection=None, hosted_zone_id=None, comment=None):
self.connection = connection
self.hosted_zone_id = hosted_zone_id
self.comment = comment
self.changes = []
self.next_record_name = None
self.next_record_type = None
super(ResourceRecordSets, self).__init__([('ResourceRecordSet', Record)])
def __repr__(self):
if self.changes:
record_list = ','.join([c.__repr__() for c in self.changes])
else:
record_list = ','.join([record.__repr__() for record in self])
return '<ResourceRecordSets:%s [%s]' % (self.hosted_zone_id,
record_list)
def add_change(self, action, name, type, ttl=600,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None, alias_evaluate_target_health=None,
health_check=None):
"""
Add a change request to the set.
:type action: str
:param action: The action to perform ('CREATE'|'DELETE'|'UPSERT')
:type name: str
:param name: The name of the domain you want to perform the action on.
:type type: str
:param type: The DNS record type. Valid values are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
:type ttl: int
:param ttl: The resource record cache time to live (TTL), in seconds.
:type alias_hosted_zone_id: str
:param alias_dns_name: *Alias resource record sets only* The value
of the hosted zone ID, CanonicalHostedZoneNameId, for
the LoadBalancer.
:type alias_dns_name: str
:param alias_hosted_zone_id: *Alias resource record sets only*
Information about the domain to which you are redirecting traffic.
:type identifier: str
:param identifier: *Weighted and latency-based resource record sets
only* An identifier that differentiates among multiple resource
record sets that have the same combination of DNS name and type.
:type weight: int
:param weight: *Weighted resource record sets only* Among resource
record sets that have the same combination of DNS name and type,
a value that determines what portion of traffic for the current
resource record set is routed to the associated location
:type region: str
:param region: *Latency-based resource record sets only* Among resource
record sets that have the same combination of DNS name and type,
a value that determines which region this should be associated with
for the latency-based routing
:type alias_evaluate_target_health: Boolean
:param region: *Required for alias resource record sets* Indicates
whether this Resource Record Set should respect the health status of
any health checks associated with the ALIAS target record which it is
linked to.
:type health_check: str
:param health_check: Health check to associate with this record
"""
change = Record(name, type, ttl,
alias_hosted_zone_id=alias_hosted_zone_id,
alias_dns_name=alias_dns_name, identifier=identifier,
weight=weight, region=region,
alias_evaluate_target_health=alias_evaluate_target_health,
health_check=health_check)
self.changes.append([action, change])
return change
def add_change_record(self, action, change):
"""Add an existing record to a change set with the specified action"""
self.changes.append([action, change])
return
def to_xml(self):
"""Convert this ResourceRecordSet into XML
to be saved via the ChangeResourceRecordSetsRequest"""
changesXML = ""
for change in self.changes:
changeParams = {"action": change[0], "record": change[1].to_xml()}
changesXML += self.ChangeXML % changeParams
params = {"comment": self.comment, "changes": changesXML}
return self.ChangeResourceRecordSetsBody % params
def commit(self):
"""Commit this change"""
if not self.connection:
import boto
self.connection = boto.connect_route53()
return self.connection.change_rrsets(self.hosted_zone_id, self.to_xml())
def endElement(self, name, value, connection):
"""Overwritten to also add the NextRecordName and
NextRecordType to the base object"""
if name == 'NextRecordName':
self.next_record_name = value
elif name == 'NextRecordType':
self.next_record_type = value
else:
return super(ResourceRecordSets, self).endElement(name, value, connection)
def __iter__(self):
"""Override the next function to support paging"""
results = super(ResourceRecordSets, self).__iter__()
truncated = self.is_truncated
while results:
for obj in results:
yield obj
if self.is_truncated:
self.is_truncated = False
results = self.connection.get_all_rrsets(self.hosted_zone_id, name=self.next_record_name, type=self.next_record_type)
else:
results = None
self.is_truncated = truncated
class Record(object):
"""An individual ResourceRecordSet"""
HealthCheckBody = """<HealthCheckId>%s</HealthCheckId>"""
XMLBody = """<ResourceRecordSet>
<Name>%(name)s</Name>
<Type>%(type)s</Type>
%(weight)s
%(body)s
%(health_check)s
</ResourceRecordSet>"""
WRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Weight>%(weight)s</Weight>
"""
RRRBody = """
<SetIdentifier>%(identifier)s</SetIdentifier>
<Region>%(region)s</Region>
"""
ResourceRecordsBody = """
<TTL>%(ttl)s</TTL>
<ResourceRecords>
%(records)s
</ResourceRecords>"""
ResourceRecordBody = """<ResourceRecord>
<Value>%s</Value>
</ResourceRecord>"""
AliasBody = """<AliasTarget>
<HostedZoneId>%(hosted_zone_id)s</HostedZoneId>
<DNSName>%(dns_name)s</DNSName>
%(eval_target_health)s
</AliasTarget>"""
EvaluateTargetHealth = """<EvaluateTargetHealth>%s</EvaluateTargetHealth>"""
def __init__(self, name=None, type=None, ttl=600, resource_records=None,
alias_hosted_zone_id=None, alias_dns_name=None, identifier=None,
weight=None, region=None, alias_evaluate_target_health=None,
health_check=None):
self.name = name
self.type = type
self.ttl = ttl
if resource_records is None:
resource_records = []
self.resource_records = resource_records
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.identifier = identifier
self.weight = weight
self.region = region
self.alias_evaluate_target_health = alias_evaluate_target_health
self.health_check = health_check
def __repr__(self):
return '<Record:%s:%s:%s>' % (self.name, self.type, self.to_print())
def add_value(self, value):
"""Add a resource record value"""
self.resource_records.append(value)
def set_alias(self, alias_hosted_zone_id, alias_dns_name,
alias_evaluate_target_health=False):
"""Make this an alias resource record set"""
self.alias_hosted_zone_id = alias_hosted_zone_id
self.alias_dns_name = alias_dns_name
self.alias_evaluate_target_health = alias_evaluate_target_health
def to_xml(self):
"""Spit this resource record set out as XML"""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Use alias
if self.alias_evaluate_target_health is not None:
eval_target_health = self.EvaluateTargetHealth % ('true' if self.alias_evaluate_target_health else 'false')
else:
eval_target_health = ""
body = self.AliasBody % { "hosted_zone_id": self.alias_hosted_zone_id,
"dns_name": self.alias_dns_name,
"eval_target_health": eval_target_health }
else:
# Use resource record(s)
records = ""
for r in self.resource_records:
records += self.ResourceRecordBody % r
body = self.ResourceRecordsBody % {
"ttl": self.ttl,
"records": records,
}
weight = ""
if self.identifier is not None and self.weight is not None:
weight = self.WRRBody % {"identifier": self.identifier, "weight":
self.weight}
elif self.identifier is not None and self.region is not None:
weight = self.RRRBody % {"identifier": self.identifier, "region":
self.region}
health_check = ""
if self.health_check is not None:
health_check = self.HealthCheckBody % (self.health_check)
params = {
"name": self.name,
"type": self.type,
"weight": weight,
"body": body,
"health_check": health_check
}
return self.XMLBody % params
def to_print(self):
rr = ""
if self.alias_hosted_zone_id is not None and self.alias_dns_name is not None:
# Show alias
rr = 'ALIAS ' + self.alias_hosted_zone_id + ' ' + self.alias_dns_name
if self.alias_evaluate_target_health is not None:
rr += ' (EvalTarget %s)' % self.alias_evaluate_target_health
else:
# Show resource record(s)
rr = ",".join(self.resource_records)
if self.identifier is not None and self.weight is not None:
rr += ' (WRR id=%s, w=%s)' % (self.identifier, self.weight)
elif self.identifier is not None and self.region is not None:
rr += ' (LBR id=%s, region=%s)' % (self.identifier, self.region)
return rr
def endElement(self, name, value, connection):
if name == 'Name':
self.name = value
elif name == 'Type':
self.type = value
elif name == 'TTL':
self.ttl = value
elif name == 'Value':
self.resource_records.append(value)
elif name == 'HostedZoneId':
self.alias_hosted_zone_id = value
elif name == 'DNSName':
self.alias_dns_name = value
elif name == 'SetIdentifier':
self.identifier = value
elif name == 'EvaluateTargetHealth':
self.alias_evaluate_target_health = value.lower() == 'true'
elif name == 'Weight':
self.weight = value
elif name == 'Region':
self.region = value
def startElement(self, name, attrs, connection):
return None
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""NCF framework to train and evaluate the NeuMF model.
The NeuMF model assembles both MF and MLP models under the NCF framework. Check
`neumf_model.py` for more details about the models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import heapq
import math
import multiprocessing
import os
import signal
import typing
# pylint: disable=g-bad-import-order
import numpy as np
from absl import app as absl_app
from absl import flags
import tensorflow as tf
# pylint: enable=g-bad-import-order
from tensorflow.contrib.compiler import xla
from official.datasets import movielens
from official.recommendation import constants as rconst
from official.recommendation import data_preprocessing
from official.recommendation import model_runner
from official.recommendation import neumf_model
from official.utils.flags import core as flags_core
from official.utils.logs import hooks_helper
from official.utils.logs import logger
from official.utils.logs import mlperf_helper
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
FLAGS = flags.FLAGS
def construct_estimator(num_gpus, model_dir, iterations, params, batch_size,
eval_batch_size):
"""Construct either an Estimator or TPUEstimator for NCF.
Args:
num_gpus: The number of gpus (Used to select distribution strategy)
model_dir: The model directory for the estimator
iterations: Estimator iterations
params: The params dict for the estimator
batch_size: The mini-batch size for training.
eval_batch_size: The batch size used during evaluation.
Returns:
An Estimator or TPUEstimator.
"""
if params["use_tpu"]:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu=params["tpu"],
zone=params["tpu_zone"],
project=params["tpu_gcp_project"],
)
tf.logging.info("Issuing reset command to TPU to ensure a clean state.")
tf.Session.reset(tpu_cluster_resolver.get_master())
tpu_config = tf.contrib.tpu.TPUConfig(
iterations_per_loop=iterations,
num_shards=8)
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
save_checkpoints_secs=600,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False),
tpu_config=tpu_config)
tpu_params = {k: v for k, v in params.items() if k != "batch_size"}
train_estimator = tf.contrib.tpu.TPUEstimator(
model_fn=neumf_model.neumf_model_fn,
use_tpu=True,
train_batch_size=batch_size,
eval_batch_size=eval_batch_size,
params=tpu_params,
config=run_config)
eval_estimator = tf.contrib.tpu.TPUEstimator(
model_fn=neumf_model.neumf_model_fn,
use_tpu=True,
train_batch_size=1,
eval_batch_size=eval_batch_size,
params=tpu_params,
config=run_config)
return train_estimator, eval_estimator
distribution = distribution_utils.get_distribution_strategy(num_gpus=num_gpus)
run_config = tf.estimator.RunConfig(train_distribute=distribution,
eval_distribute=distribution)
params["eval_batch_size"] = eval_batch_size
model_fn = neumf_model.neumf_model_fn
if params["use_xla_for_gpu"]:
tf.logging.info("Using XLA for GPU for training and evaluation.")
model_fn = xla.estimator_model_fn(model_fn)
estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=model_dir,
config=run_config, params=params)
return estimator, estimator
def main(_):
with logger.benchmark_context(FLAGS), \
mlperf_helper.LOGGER(FLAGS.output_ml_perf_compliance_logging):
mlperf_helper.set_ncf_root(os.path.split(os.path.abspath(__file__))[0])
run_ncf(FLAGS)
mlperf_helper.stitch_ncf()
def run_ncf(_):
"""Run NCF training and eval loop."""
if FLAGS.download_if_missing and not FLAGS.use_synthetic_data:
movielens.download(FLAGS.dataset, FLAGS.data_dir)
if FLAGS.seed is not None:
np.random.seed(FLAGS.seed)
num_gpus = flags_core.get_num_gpus(FLAGS)
batch_size = distribution_utils.per_device_batch_size(
int(FLAGS.batch_size), num_gpus)
total_training_cycle = FLAGS.train_epochs // FLAGS.epochs_between_evals
eval_per_user = rconst.NUM_EVAL_NEGATIVES + 1
eval_batch_size = int(FLAGS.eval_batch_size or
max([FLAGS.batch_size, eval_per_user]))
if eval_batch_size % eval_per_user:
eval_batch_size = eval_batch_size // eval_per_user * eval_per_user
tf.logging.warning(
"eval examples per user does not evenly divide eval_batch_size. "
"Overriding to {}".format(eval_batch_size))
if FLAGS.use_synthetic_data:
ncf_dataset = None
cleanup_fn = lambda: None
num_users, num_items = data_preprocessing.DATASET_TO_NUM_USERS_AND_ITEMS[
FLAGS.dataset]
num_train_steps = data_preprocessing.SYNTHETIC_BATCHES_PER_EPOCH
num_eval_steps = data_preprocessing.SYNTHETIC_BATCHES_PER_EPOCH
else:
ncf_dataset, cleanup_fn = data_preprocessing.instantiate_pipeline(
dataset=FLAGS.dataset, data_dir=FLAGS.data_dir,
batch_size=batch_size,
eval_batch_size=eval_batch_size,
num_neg=FLAGS.num_neg,
epochs_per_cycle=FLAGS.epochs_between_evals,
num_cycles=total_training_cycle,
match_mlperf=FLAGS.ml_perf,
deterministic=FLAGS.seed is not None,
use_subprocess=FLAGS.use_subprocess,
cache_id=FLAGS.cache_id)
num_users = ncf_dataset.num_users
num_items = ncf_dataset.num_items
num_train_steps = int(np.ceil(
FLAGS.epochs_between_evals * ncf_dataset.num_train_positives *
(1 + FLAGS.num_neg) / FLAGS.batch_size))
num_eval_steps = int(np.ceil((1 + rconst.NUM_EVAL_NEGATIVES) *
ncf_dataset.num_users / eval_batch_size))
model_helpers.apply_clean(flags.FLAGS)
params = {
"use_seed": FLAGS.seed is not None,
"hash_pipeline": FLAGS.hash_pipeline,
"batch_size": batch_size,
"eval_batch_size": eval_batch_size,
"learning_rate": FLAGS.learning_rate,
"num_users": num_users,
"num_items": num_items,
"mf_dim": FLAGS.num_factors,
"model_layers": [int(layer) for layer in FLAGS.layers],
"mf_regularization": FLAGS.mf_regularization,
"mlp_reg_layers": [float(reg) for reg in FLAGS.mlp_regularization],
"num_neg": FLAGS.num_neg,
"use_tpu": FLAGS.tpu is not None,
"tpu": FLAGS.tpu,
"tpu_zone": FLAGS.tpu_zone,
"tpu_gcp_project": FLAGS.tpu_gcp_project,
"beta1": FLAGS.beta1,
"beta2": FLAGS.beta2,
"epsilon": FLAGS.epsilon,
"match_mlperf": FLAGS.ml_perf,
"use_xla_for_gpu": FLAGS.use_xla_for_gpu,
"use_estimator": FLAGS.use_estimator,
}
if FLAGS.use_estimator:
train_estimator, eval_estimator = construct_estimator(
num_gpus=num_gpus, model_dir=FLAGS.model_dir,
iterations=num_train_steps, params=params,
batch_size=flags.FLAGS.batch_size, eval_batch_size=eval_batch_size)
else:
runner = model_runner.NcfModelRunner(ncf_dataset, params, num_train_steps,
num_eval_steps, FLAGS.use_while_loop)
# Create hooks that log information about the training and metric values
train_hooks = hooks_helper.get_train_hooks(
FLAGS.hooks,
model_dir=FLAGS.model_dir,
batch_size=FLAGS.batch_size, # for ExamplesPerSecondHook
tensors_to_log={"cross_entropy": "cross_entropy"}
)
run_params = {
"batch_size": FLAGS.batch_size,
"eval_batch_size": eval_batch_size,
"number_factors": FLAGS.num_factors,
"hr_threshold": FLAGS.hr_threshold,
"train_epochs": FLAGS.train_epochs,
}
benchmark_logger = logger.get_benchmark_logger()
benchmark_logger.log_run_info(
model_name="recommendation",
dataset_name=FLAGS.dataset,
run_params=run_params,
test_id=FLAGS.benchmark_test_id)
eval_input_fn = None
target_reached = False
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.TRAIN_LOOP)
for cycle_index in range(total_training_cycle):
assert FLAGS.epochs_between_evals == 1 or not mlperf_helper.LOGGER.enabled
tf.logging.info("Starting a training cycle: {}/{}".format(
cycle_index + 1, total_training_cycle))
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.TRAIN_EPOCH,
value=cycle_index)
# Train the model
if FLAGS.use_estimator:
train_input_fn, train_record_dir, batch_count = \
data_preprocessing.make_input_fn(
ncf_dataset=ncf_dataset, is_training=True)
if batch_count != num_train_steps:
raise ValueError(
"Step counts do not match. ({} vs. {}) The async process is "
"producing incorrect shards.".format(batch_count, num_train_steps))
train_estimator.train(input_fn=train_input_fn, hooks=train_hooks,
steps=num_train_steps)
if train_record_dir:
tf.gfile.DeleteRecursively(train_record_dir)
tf.logging.info("Beginning evaluation.")
if eval_input_fn is None:
eval_input_fn, _, eval_batch_count = data_preprocessing.make_input_fn(
ncf_dataset=ncf_dataset, is_training=False)
if eval_batch_count != num_eval_steps:
raise ValueError(
"Step counts do not match. ({} vs. {}) The async process is "
"producing incorrect shards.".format(
eval_batch_count, num_eval_steps))
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_START,
value=cycle_index)
eval_results = eval_estimator.evaluate(eval_input_fn,
steps=num_eval_steps)
tf.logging.info("Evaluation complete.")
else:
runner.train()
tf.logging.info("Beginning evaluation.")
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_START,
value=cycle_index)
eval_results = runner.eval()
tf.logging.info("Evaluation complete.")
hr = float(eval_results[rconst.HR_KEY])
ndcg = float(eval_results[rconst.NDCG_KEY])
mlperf_helper.ncf_print(
key=mlperf_helper.TAGS.EVAL_TARGET,
value={"epoch": cycle_index, "value": FLAGS.hr_threshold})
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_ACCURACY,
value={"epoch": cycle_index, "value": hr})
mlperf_helper.ncf_print(
key=mlperf_helper.TAGS.EVAL_HP_NUM_NEG,
value={"epoch": cycle_index, "value": rconst.NUM_EVAL_NEGATIVES})
# Logged by the async process during record creation.
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_HP_NUM_USERS,
deferred=True)
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.EVAL_STOP, value=cycle_index)
# Benchmark the evaluation results
benchmark_logger.log_evaluation_result(eval_results)
# Log the HR and NDCG results.
tf.logging.info(
"Iteration {}: HR = {:.4f}, NDCG = {:.4f}".format(
cycle_index + 1, hr, ndcg))
# If some evaluation threshold is met
if model_helpers.past_stop_threshold(FLAGS.hr_threshold, hr):
target_reached = True
break
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.RUN_STOP,
value={"success": target_reached})
cleanup_fn() # Cleanup data construction artifacts and subprocess.
# Clear the session explicitly to avoid session delete error
tf.keras.backend.clear_session()
mlperf_helper.ncf_print(key=mlperf_helper.TAGS.RUN_FINAL)
def define_ncf_flags():
"""Add flags for running ncf_main."""
# Add common flags
flags_core.define_base(export_dir=False)
flags_core.define_performance(
num_parallel_calls=False,
inter_op=False,
intra_op=False,
synthetic_data=True,
max_train_steps=False,
dtype=False,
all_reduce_alg=False
)
flags_core.define_device(tpu=True)
flags_core.define_benchmark()
flags.adopt_module_key_flags(flags_core)
flags_core.set_defaults(
model_dir="/tmp/ncf/",
data_dir="/tmp/movielens-data/",
train_epochs=2,
batch_size=256,
hooks="ProfilerHook",
tpu=None
)
# Add ncf-specific flags
flags.DEFINE_enum(
name="dataset", default="ml-1m",
enum_values=["ml-1m", "ml-20m"], case_sensitive=False,
help=flags_core.help_wrap(
"Dataset to be trained and evaluated."))
flags.DEFINE_boolean(
name="download_if_missing", default=True, help=flags_core.help_wrap(
"Download data to data_dir if it is not already present."))
flags.DEFINE_string(
name="eval_batch_size", default=None, help=flags_core.help_wrap(
"The batch size used for evaluation. This should generally be larger"
"than the training batch size as the lack of back propagation during"
"evaluation can allow for larger batch sizes to fit in memory. If not"
"specified, the training batch size (--batch_size) will be used."))
flags.DEFINE_integer(
name="num_factors", default=8,
help=flags_core.help_wrap("The Embedding size of MF model."))
# Set the default as a list of strings to be consistent with input arguments
flags.DEFINE_list(
name="layers", default=["64", "32", "16", "8"],
help=flags_core.help_wrap(
"The sizes of hidden layers for MLP. Example "
"to specify different sizes of MLP layers: --layers=32,16,8,4"))
flags.DEFINE_float(
name="mf_regularization", default=0.,
help=flags_core.help_wrap(
"The regularization factor for MF embeddings. The factor is used by "
"regularizer which allows to apply penalties on layer parameters or "
"layer activity during optimization."))
flags.DEFINE_list(
name="mlp_regularization", default=["0.", "0.", "0.", "0."],
help=flags_core.help_wrap(
"The regularization factor for each MLP layer. See mf_regularization "
"help for more info about regularization factor."))
flags.DEFINE_integer(
name="num_neg", default=4,
help=flags_core.help_wrap(
"The Number of negative instances to pair with a positive instance."))
flags.DEFINE_float(
name="learning_rate", default=0.001,
help=flags_core.help_wrap("The learning rate."))
flags.DEFINE_float(
name="beta1", default=0.9,
help=flags_core.help_wrap("beta1 hyperparameter for the Adam optimizer."))
flags.DEFINE_float(
name="beta2", default=0.999,
help=flags_core.help_wrap("beta2 hyperparameter for the Adam optimizer."))
flags.DEFINE_float(
name="epsilon", default=1e-8,
help=flags_core.help_wrap("epsilon hyperparameter for the Adam "
"optimizer."))
flags.DEFINE_float(
name="hr_threshold", default=None,
help=flags_core.help_wrap(
"If passed, training will stop when the evaluation metric HR is "
"greater than or equal to hr_threshold. For dataset ml-1m, the "
"desired hr_threshold is 0.68 which is the result from the paper; "
"For dataset ml-20m, the threshold can be set as 0.95 which is "
"achieved by MLPerf implementation."))
flags.DEFINE_bool(
name="ml_perf", default=False,
help=flags_core.help_wrap(
"If set, changes the behavior of the model slightly to match the "
"MLPerf reference implementations here: \n"
"https://github.com/mlperf/reference/tree/master/recommendation/"
"pytorch\n"
"The two changes are:\n"
"1. When computing the HR and NDCG during evaluation, remove "
"duplicate user-item pairs before the computation. This results in "
"better HRs and NDCGs.\n"
"2. Use a different soring algorithm when sorting the input data, "
"which performs better due to the fact the sorting algorithms are "
"not stable."))
flags.DEFINE_bool(
name="output_ml_perf_compliance_logging", default=False,
help=flags_core.help_wrap(
"If set, output the MLPerf compliance logging. This is only useful "
"if one is running the model for MLPerf. See "
"https://github.com/mlperf/policies/blob/master/training_rules.adoc"
"#submission-compliance-logs for details. This uses sudo and so may "
"ask for your password, as root access is needed to clear the system "
"caches, which is required for MLPerf compliance."
)
)
flags.DEFINE_integer(
name="seed", default=None, help=flags_core.help_wrap(
"This value will be used to seed both NumPy and TensorFlow."))
flags.DEFINE_bool(
name="hash_pipeline", default=False, help=flags_core.help_wrap(
"This flag will perform a separate run of the pipeline and hash "
"batches as they are produced. \nNOTE: this will significantly slow "
"training. However it is useful to confirm that a random seed is "
"does indeed make the data pipeline deterministic."))
@flags.validator("eval_batch_size", "eval_batch_size must be at least {}"
.format(rconst.NUM_EVAL_NEGATIVES + 1))
def eval_size_check(eval_batch_size):
return (eval_batch_size is None or
int(eval_batch_size) > rconst.NUM_EVAL_NEGATIVES)
flags.DEFINE_bool(
name="use_subprocess", default=True, help=flags_core.help_wrap(
"By default, ncf_main.py starts async data generation process as a "
"subprocess. If set to False, ncf_main.py will assume the async data "
"generation process has already been started by the user."))
flags.DEFINE_integer(name="cache_id", default=None, help=flags_core.help_wrap(
"Use a specified cache_id rather than using a timestamp. This is only "
"needed to synchronize across multiple workers. Generally this flag will "
"not need to be set."
))
flags.DEFINE_bool(
name="use_xla_for_gpu", default=False, help=flags_core.help_wrap(
"If True, use XLA for the model function. Only works when using a "
"GPU. On TPUs, XLA is always used"))
xla_message = "--use_xla_for_gpu is incompatible with --tpu"
@flags.multi_flags_validator(["use_xla_for_gpu", "tpu"], message=xla_message)
def xla_validator(flag_dict):
return not flag_dict["use_xla_for_gpu"] or not flag_dict["tpu"]
flags.DEFINE_bool(
name="use_estimator", default=True, help=flags_core.help_wrap(
"If True, use Estimator to train. Setting to False is slightly "
"faster, but when False, the following are currently unsupported:\n"
" * Using TPUs\n"
" * Using more than 1 GPU\n"
" * Reloading from checkpoints\n"
" * Any hooks specified with --hooks\n"))
flags.DEFINE_bool(
name="use_while_loop", default=None, help=flags_core.help_wrap(
"If set, run an entire epoch in a session.run() call using a "
"TensorFlow while loop. This can improve performance, but will not "
"print out losses throughout the epoch. Requires "
"--use_estimator=false"
))
xla_message = "--use_while_loop requires --use_estimator=false"
@flags.multi_flags_validator(["use_while_loop", "use_estimator"],
message=xla_message)
def while_loop_validator(flag_dict):
return (not flag_dict["use_while_loop"] or
not flag_dict["use_estimator"])
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
define_ncf_flags()
absl_app.run(main)
| |
import sys, types, weakref
from collections import deque
from test.bootstrap import config
from test.lib.util import decorator, gc_collect
from sqlalchemy.util import callable
from sqlalchemy import event, pool
from sqlalchemy.engine import base as engine_base
import re
import warnings
class ConnectionKiller(object):
def __init__(self):
self.proxy_refs = weakref.WeakKeyDictionary()
self.testing_engines = weakref.WeakKeyDictionary()
self.conns = set()
def add_engine(self, engine):
self.testing_engines[engine] = True
def connect(self, dbapi_conn, con_record):
self.conns.add(dbapi_conn)
def checkout(self, dbapi_con, con_record, con_proxy):
self.proxy_refs[con_proxy] = True
def _safe(self, fn):
try:
fn()
except (SystemExit, KeyboardInterrupt):
raise
except Exception, e:
warnings.warn(
"testing_reaper couldn't "
"rollback/close connection: %s" % e)
def rollback_all(self):
for rec in self.proxy_refs.keys():
if rec is not None and rec.is_valid:
self._safe(rec.rollback)
def close_all(self):
for rec in self.proxy_refs.keys():
if rec is not None:
self._safe(rec._close)
def _after_test_ctx(self):
pass
# this can cause a deadlock with pg8000 - pg8000 acquires
# prepared statment lock inside of rollback() - if async gc
# is collecting in finalize_fairy, deadlock.
# not sure if this should be if pypy/jython only
#for conn in self.conns:
# self._safe(conn.rollback)
def _stop_test_ctx(self):
if config.options.low_connections:
self._stop_test_ctx_minimal()
else:
self._stop_test_ctx_aggressive()
def _stop_test_ctx_minimal(self):
from test.lib import testing
self.close_all()
self.conns = set()
for rec in self.testing_engines.keys():
if rec is not testing.db:
rec.dispose()
def _stop_test_ctx_aggressive(self):
self.close_all()
for conn in self.conns:
self._safe(conn.close)
self.conns = set()
for rec in self.testing_engines.keys():
rec.dispose()
def assert_all_closed(self):
for rec in self.proxy_refs:
if rec.is_valid:
assert False
testing_reaper = ConnectionKiller()
def drop_all_tables(metadata, bind):
testing_reaper.close_all()
if hasattr(bind, 'close'):
bind.close()
metadata.drop_all(bind)
@decorator
def assert_conns_closed(fn, *args, **kw):
try:
fn(*args, **kw)
finally:
testing_reaper.assert_all_closed()
@decorator
def rollback_open_connections(fn, *args, **kw):
"""Decorator that rolls back all open connections after fn execution."""
try:
fn(*args, **kw)
finally:
testing_reaper.rollback_all()
@decorator
def close_first(fn, *args, **kw):
"""Decorator that closes all connections before fn execution."""
testing_reaper.close_all()
fn(*args, **kw)
@decorator
def close_open_connections(fn, *args, **kw):
"""Decorator that closes all connections after fn execution."""
try:
fn(*args, **kw)
finally:
testing_reaper.close_all()
def all_dialects(exclude=None):
import sqlalchemy.databases as d
for name in d.__all__:
# TEMPORARY
if exclude and name in exclude:
continue
mod = getattr(d, name, None)
if not mod:
mod = getattr(__import__('sqlalchemy.databases.%s' % name).databases, name)
yield mod.dialect()
class ReconnectFixture(object):
def __init__(self, dbapi):
self.dbapi = dbapi
self.connections = []
def __getattr__(self, key):
return getattr(self.dbapi, key)
def connect(self, *args, **kwargs):
conn = self.dbapi.connect(*args, **kwargs)
self.connections.append(conn)
return conn
def _safe(self, fn):
try:
fn()
except (SystemExit, KeyboardInterrupt):
raise
except Exception, e:
warnings.warn(
"ReconnectFixture couldn't "
"close connection: %s" % e)
def shutdown(self):
# TODO: this doesn't cover all cases
# as nicely as we'd like, namely MySQLdb.
# would need to implement R. Brewer's
# proxy server idea to get better
# coverage.
for c in list(self.connections):
self._safe(c.close)
self.connections = []
def reconnecting_engine(url=None, options=None):
url = url or config.db_url
dbapi = config.db.dialect.dbapi
if not options:
options = {}
options['module'] = ReconnectFixture(dbapi)
engine = testing_engine(url, options)
_dispose = engine.dispose
def dispose():
engine.dialect.dbapi.shutdown()
_dispose()
engine.test_shutdown = engine.dialect.dbapi.shutdown
engine.dispose = dispose
return engine
def testing_engine(url=None, options=None):
"""Produce an engine configured by --options with optional overrides."""
from sqlalchemy import create_engine
from test.lib.assertsql import asserter
if not options:
use_reaper = True
else:
use_reaper = options.pop('use_reaper', True)
url = url or config.db_url
options = options or config.db_opts
engine = create_engine(url, **options)
if isinstance(engine.pool, pool.QueuePool):
engine.pool._timeout = 0
engine.pool._max_overflow = 0
event.listen(engine, 'after_execute', asserter.execute)
event.listen(engine, 'after_cursor_execute', asserter.cursor_execute)
if use_reaper:
event.listen(engine.pool, 'connect', testing_reaper.connect)
event.listen(engine.pool, 'checkout', testing_reaper.checkout)
testing_reaper.add_engine(engine)
return engine
def utf8_engine(url=None, options=None):
"""Hook for dialects or drivers that don't handle utf8 by default."""
from sqlalchemy.engine import url as engine_url
if config.db.dialect.name == 'mysql' and \
config.db.driver in ['mysqldb', 'pymysql']:
# note 1.2.1.gamma.6 or greater of MySQLdb
# needed here
url = url or config.db_url
url = engine_url.make_url(url)
url.query['charset'] = 'utf8'
url.query['use_unicode'] = '0'
url = str(url)
return testing_engine(url, options)
def mock_engine(dialect_name=None):
"""Provides a mocking engine based on the current testing.db.
This is normally used to test DDL generation flow as emitted
by an Engine.
It should not be used in other cases, as assert_compile() and
assert_sql_execution() are much better choices with fewer
moving parts.
"""
from sqlalchemy import create_engine
if not dialect_name:
dialect_name = config.db.name
buffer = []
def executor(sql, *a, **kw):
buffer.append(sql)
def assert_sql(stmts):
recv = [re.sub(r'[\n\t]', '', str(s)) for s in buffer]
assert recv == stmts, recv
def print_sql():
d = engine.dialect
return "\n".join(
str(s.compile(dialect=d))
for s in engine.mock
)
engine = create_engine(dialect_name + '://',
strategy='mock', executor=executor)
assert not hasattr(engine, 'mock')
engine.mock = buffer
engine.assert_sql = assert_sql
engine.print_sql = print_sql
return engine
class DBAPIProxyCursor(object):
"""Proxy a DBAPI cursor.
Tests can provide subclasses of this to intercept
DBAPI-level cursor operations.
"""
def __init__(self, engine, conn):
self.engine = engine
self.connection = conn
self.cursor = conn.cursor()
def execute(self, stmt, parameters=None, **kw):
if parameters:
return self.cursor.execute(stmt, parameters, **kw)
else:
return self.cursor.execute(stmt, **kw)
def executemany(self, stmt, params, **kw):
return self.cursor.executemany(stmt, params, **kw)
def __getattr__(self, key):
return getattr(self.cursor, key)
class DBAPIProxyConnection(object):
"""Proxy a DBAPI connection.
Tests can provide subclasses of this to intercept
DBAPI-level connection operations.
"""
def __init__(self, engine, cursor_cls):
self.conn = self._sqla_unwrap = engine.pool._creator()
self.engine = engine
self.cursor_cls = cursor_cls
def cursor(self):
return self.cursor_cls(self.engine, self.conn)
def close(self):
self.conn.close()
def __getattr__(self, key):
return getattr(self.conn, key)
def proxying_engine(conn_cls=DBAPIProxyConnection, cursor_cls=DBAPIProxyCursor):
"""Produce an engine that provides proxy hooks for
common methods.
"""
def mock_conn():
return conn_cls(config.db, cursor_cls)
return testing_engine(options={'creator':mock_conn})
class ReplayableSession(object):
"""A simple record/playback tool.
This is *not* a mock testing class. It only records a session for later
playback and makes no assertions on call consistency whatsoever. It's
unlikely to be suitable for anything other than DB-API recording.
"""
Callable = object()
NoAttribute = object()
# Py3K
#Natives = set([getattr(types, t)
# for t in dir(types) if not t.startswith('_')]). \
# union([type(t) if not isinstance(t, type)
# else t for t in __builtins__.values()]).\
# difference([getattr(types, t)
# for t in ('FunctionType', 'BuiltinFunctionType',
# 'MethodType', 'BuiltinMethodType',
# 'LambdaType', )])
# Py2K
Natives = set([getattr(types, t)
for t in dir(types) if not t.startswith('_')]). \
difference([getattr(types, t)
for t in ('FunctionType', 'BuiltinFunctionType',
'MethodType', 'BuiltinMethodType',
'LambdaType', 'UnboundMethodType',)])
# end Py2K
def __init__(self):
self.buffer = deque()
def recorder(self, base):
return self.Recorder(self.buffer, base)
def player(self):
return self.Player(self.buffer)
class Recorder(object):
def __init__(self, buffer, subject):
self._buffer = buffer
self._subject = subject
def __call__(self, *args, **kw):
subject, buffer = [object.__getattribute__(self, x)
for x in ('_subject', '_buffer')]
result = subject(*args, **kw)
if type(result) not in ReplayableSession.Natives:
buffer.append(ReplayableSession.Callable)
return type(self)(buffer, result)
else:
buffer.append(result)
return result
@property
def _sqla_unwrap(self):
return self._subject
def __getattribute__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
pass
subject, buffer = [object.__getattribute__(self, x)
for x in ('_subject', '_buffer')]
try:
result = type(subject).__getattribute__(subject, key)
except AttributeError:
buffer.append(ReplayableSession.NoAttribute)
raise
else:
if type(result) not in ReplayableSession.Natives:
buffer.append(ReplayableSession.Callable)
return type(self)(buffer, result)
else:
buffer.append(result)
return result
class Player(object):
def __init__(self, buffer):
self._buffer = buffer
def __call__(self, *args, **kw):
buffer = object.__getattribute__(self, '_buffer')
result = buffer.popleft()
if result is ReplayableSession.Callable:
return self
else:
return result
@property
def _sqla_unwrap(self):
return None
def __getattribute__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
pass
buffer = object.__getattribute__(self, '_buffer')
result = buffer.popleft()
if result is ReplayableSession.Callable:
return self
elif result is ReplayableSession.NoAttribute:
raise AttributeError(key)
else:
return result
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Doriancoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descendant package tracking code."""
from test_framework.test_framework import DoriancoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN
MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
class MempoolPackagesTest(DoriancoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-maxorphantx=1000"], ["-maxorphantx=1000", "-limitancestorcount=5"]]
# Build a transaction that spends parent_txid:vout
# Return amount sent
def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):
send_value = satoshi_round((value - fee)/num_outputs)
inputs = [ {'txid' : parent_txid, 'vout' : vout} ]
outputs = {}
for i in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs)
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx['hex'])
fulltx = node.getrawtransaction(txid, 1)
assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output
return (txid, send_value)
def run_test(self):
''' Mine some blocks and have them mature. '''
self.nodes[0].generate(101)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
fee = Decimal("0.0001")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
for i in range(MAX_ANCESTORS):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1)
value = sent_value
chain.append(txid)
# Check mempool has MAX_ANCESTORS transactions in it, and descendant
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
assert_equal(len(mempool), MAX_ANCESTORS)
descendant_count = 1
descendant_fees = 0
descendant_size = 0
descendants = []
ancestors = list(chain)
for x in reversed(chain):
# Check that getmempoolentry is consistent with getrawmempool
entry = self.nodes[0].getmempoolentry(x)
assert_equal(entry, mempool[x])
# Check that the descendant calculations are correct
assert_equal(mempool[x]['descendantcount'], descendant_count)
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'])
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN)
descendant_size += mempool[x]['size']
assert_equal(mempool[x]['descendantsize'], descendant_size)
descendant_count += 1
# Check that getmempooldescendants is correct
assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
descendants.append(x)
# Check that getmempoolancestors is correct
ancestors.remove(x)
assert_equal(sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)))
# Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
assert_equal(len(v_ancestors), len(chain)-1)
for x in v_ancestors.keys():
assert_equal(mempool[x], v_ancestors[x])
assert(chain[-1] not in v_ancestors.keys())
v_descendants = self.nodes[0].getmempooldescendants(chain[0], True)
assert_equal(len(v_descendants), len(chain)-1)
for x in v_descendants.keys():
assert_equal(mempool[x], v_descendants[x])
assert(chain[0] not in v_descendants.keys())
# Check that ancestor modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=1000)
mempool = self.nodes[0].getrawmempool(True)
ancestor_fees = 0
for x in chain:
ancestor_fees += mempool[x]['fee']
assert_equal(mempool[x]['ancestorfees'], ancestor_fees * COIN + 1000)
# Undo the prioritisetransaction for later tests
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=-1000)
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=1000)
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000)
# Adding one more transaction on to the chain should fail.
assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], txid, vout, value, fee, 1)
# Check that prioritising a tx before it's added to the mempool works
# First clear the mempool by mining a block.
self.nodes[0].generate(1)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Prioritise a transaction that has been mined, then add it back to the
# mempool by using invalidateblock.
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=2000)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Keep node1's tip synced with node0
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
# Now check that the transaction is in the mempool, with the right modified fee
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
if (x == chain[-1]):
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']+satoshi_round(0.00002))
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000)
# TODO: check that node1's mempool is as expected
# TODO: test ancestor size limits
# Now test descendant chain limits
txid = utxo[1]['txid']
value = utxo[1]['amount']
vout = utxo[1]['vout']
transaction_package = []
# First create one parent tx with 10 children
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 10)
parent_transaction = txid
for i in range(10):
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
# Sign and send up to MAX_DESCENDANT transactions chained off the parent tx
for i in range(MAX_DESCENDANTS - 1):
utxo = transaction_package.pop(0)
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
for j in range(10):
transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value})
mempool = self.nodes[0].getrawmempool(True)
assert_equal(mempool[parent_transaction]['descendantcount'], MAX_DESCENDANTS)
# Sending one more chained transaction will fail
utxo = transaction_package.pop(0)
assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
# TODO: check that node1's mempool is as expected
# TODO: test descendant size limits
# Test reorg handling
# First, the basics:
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash())
self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash())
# Now test the case where node1 has a transaction T in its mempool that
# depends on transactions A and B which are in a mined block, and the
# block containing A and B is disconnected, AND B is not accepted back
# into node1's mempool because its ancestor count is too high.
# Create 8 transactions, like so:
# Tx0 -> Tx1 (vout0)
# \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7
#
# Mine them in the next block, then generate a new tx8 that spends
# Tx1 and Tx7, and add to node1's mempool, then disconnect the
# last block.
# Create tx0 with 2 outputs
utxo = self.nodes[0].listunspent()
txid = utxo[0]['txid']
value = utxo[0]['amount']
vout = utxo[0]['vout']
send_value = satoshi_round((value - fee)/2)
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
for i in range(2):
outputs[self.nodes[0].getnewaddress()] = send_value
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
tx0_id = txid
value = send_value
# Create tx1
tx1_id, _ = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1)
# Create tx2-7
vout = 1
txid = tx0_id
for i in range(6):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
vout = 0
value = sent_value
# Mine these in a block
self.nodes[0].generate(1)
self.sync_all()
# Now generate tx8, with a big fee
inputs = [ {'txid' : tx1_id, 'vout': 0}, {'txid' : txid, 'vout': 0} ]
outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
sync_mempools(self.nodes)
# Now try to disconnect the tip on each node...
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
sync_blocks(self.nodes)
if __name__ == '__main__':
MempoolPackagesTest().main()
| |
import re
import os
import shutil
import time
import cloudpickle
from typing import List, Optional, Dict
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
from torch.optim import Adam, lr_scheduler
from torchtext.data.field import Field
from torchtext.data.iterator import Iterator
from qanta import qlogging
from qanta.util.io import shell, get_tmp_filename
from qanta.torch.dataset import QuizBowl
from qanta.config import conf
from qanta.guesser.abstract import AbstractGuesser
from qanta.datasets.abstract import QuestionText
from qanta.torch import (
BaseLogger,
TerminateOnNaN,
EarlyStopping,
ModelCheckpoint,
MaxEpochStopping,
TrainingManager,
)
log = qlogging.get(__name__)
CUDA = torch.cuda.is_available()
def create_save_model(model):
def save_model(path):
torch.save(model.state_dict(), path)
return save_model
qb_patterns = {
"\n",
", for 10 points,",
", for ten points,",
"--for 10 points--",
"for 10 points, ",
"for 10 points--",
"for ten points, ",
"for 10 points ",
"for ten points ",
", ftp," "ftp,",
"ftp",
"(*)",
}
re_pattern = "|".join([re.escape(p) for p in qb_patterns])
re_pattern += r"|\[.*?\]|\(.*?\)"
class DanEncoder(nn.Module):
def __init__(self, embedding_dim, n_hidden_layers, n_hidden_units, dropout_prob):
super(DanEncoder, self).__init__()
encoder_layers = []
for i in range(n_hidden_layers):
if i == 0:
input_dim = embedding_dim
else:
input_dim = n_hidden_units
encoder_layers.extend(
[
nn.Linear(input_dim, n_hidden_units),
nn.BatchNorm1d(n_hidden_units),
nn.ELU(),
nn.Dropout(dropout_prob),
]
)
self.encoder = nn.Sequential(*encoder_layers)
def forward(self, x_array):
return self.encoder(x_array)
class DanModel(nn.Module):
def __init__(
self,
n_classes,
*,
text_field=None,
unigram_field=None,
bigram_field=None,
trigram_field=None,
init_embeddings=True,
emb_dim=300,
n_hidden_units=1000,
n_hidden_layers=1,
nn_dropout=0.265,
pooling="avg",
):
super(DanModel, self).__init__()
self.emb_dim = emb_dim
self.n_classes = n_classes
self.n_hidden_units = n_hidden_units
self.n_hidden_layers = n_hidden_layers
self.nn_dropout = nn_dropout
self.pooling = pooling
self.dropout = nn.Dropout(nn_dropout)
if (text_field is not None) and (
unigram_field is not None
or bigram_field is not None
or trigram_field is not None
):
raise ValueError(
"Textfield being not None and any ngram field being not None is not allowed"
)
if (
text_field is None
and unigram_field is None
and bigram_field is None
and trigram_field is None
):
raise ValueError("Must have at least one text field")
if text_field is None:
self.text_vocab_size = None
self.text_embeddings = None
self.text_field = None
else:
text_vocab = text_field.vocab
self.text_vocab_size = len(text_vocab)
text_pad_idx = text_vocab.stoi[text_field.pad_token]
self.text_embeddings = nn.Embedding(
self.text_vocab_size, emb_dim, padding_idx=text_pad_idx
)
self.text_field = text_field
if init_embeddings:
mean_emb = text_vocab.vectors.mean(0)
text_vocab.vectors[text_vocab.stoi[text_field.unk_token]] = mean_emb
self.text_embeddings.weight.data = text_vocab.vectors.cuda()
if unigram_field is None:
self.unigram_vocab_size = None
self.unigram_embeddings = None
self.unigram_field = None
else:
unigram_vocab = unigram_field.vocab
self.unigram_vocab_size = len(unigram_vocab)
unigram_pad_idx = unigram_vocab.stoi[unigram_field.pad_token]
self.unigram_embeddings = nn.Embedding(
self.unigram_vocab_size, emb_dim, padding_idx=unigram_pad_idx
)
self.unigram_field = unigram_field
if init_embeddings:
mean_emb = unigram_vocab.vectors.mean(0)
unigram_vocab.vectors[
unigram_vocab.stoi[unigram_field.unk_token]
] = mean_emb
self.unigram_embeddings.weight.data = unigram_vocab.vectors.cuda()
if bigram_field is None:
self.bigram_vocab_size = None
self.bigram_embeddings = None
self.bigram_field = None
else:
bigram_vocab = bigram_field.vocab
self.bigram_vocab_size = len(bigram_vocab)
bigram_pad_idx = bigram_vocab.stoi[bigram_field.pad_token]
self.bigram_embeddings = nn.Embedding(
self.bigram_vocab_size, emb_dim, padding_idx=bigram_pad_idx
)
self.bigram_field = bigram_field
if trigram_field is None:
self.trigram_vocab_size = None
self.trigram_embeddings = None
self.trigram_field = None
else:
trigram_vocab = trigram_field.vocab
self.trigram_vocab_size = len(trigram_vocab)
trigram_pad_idx = trigram_vocab.stoi[trigram_field.pad_token]
self.trigram_embeddings = nn.Embedding(
self.trigram_vocab_size, emb_dim, padding_idx=trigram_pad_idx
)
self.trigram_field = trigram_field
if text_field is not None:
n_fields = 1
else:
n_fields = 0
if unigram_field is not None:
n_fields += 1
if bigram_field is not None:
n_fields += 1
if trigram_field is not None:
n_fields += 1
self.encoder = DanEncoder(
n_fields * emb_dim,
self.n_hidden_layers,
self.n_hidden_units,
self.nn_dropout,
)
self.classifier = nn.Sequential(
nn.Linear(self.n_hidden_units, n_classes),
nn.BatchNorm1d(n_classes),
nn.Dropout(self.nn_dropout),
)
def _pool(self, embed, lengths, batch_size):
if self.pooling == "avg":
return embed.sum(1) / lengths.view(batch_size, -1)
elif self.pooling == "max":
emb_max, _ = torch.max(embed, 1)
return emb_max
else:
raise ValueError(
f"Unsupported pooling type f{self.pooling}, only avg and max are supported"
)
def forward(self, input_: Dict[str, Variable], lengths: Dict, qanta_ids):
"""
:param input_: [batch_size, seq_len] of word indices
:param lengths: Length of each example
:param qanta_ids: QB qanta_id if a qb question, otherwise -1 for wikipedia, used to get domain as source/target
:return:
"""
for key in lengths:
if not isinstance(lengths[key], Variable):
lengths[key] = Variable(
lengths[key].float(), volatile=not self.training
)
if self.text_field is not None:
text_input = input_["text"]
embed = self.text_embeddings(text_input)
embed = self._pool(embed, lengths["text"].float(), text_input.size()[0])
embed = self.dropout(embed)
encoded = self.encoder(embed)
return self.classifier(encoded)
else:
embedding_list = []
if self.unigram_field is not None:
unigram_input = input_["unigram"]
embed = self.unigram_embeddings(unigram_input)
embed = self._pool(
embed, lengths["unigram"].float, unigram_input.size()[0]
)
embed = self.dropout(embed)
embedding_list.append(embed)
if self.bigram_field is not None:
bigram_input = input_["bigram"]
embed = self.bigram_embeddings(bigram_input)
embed = self._pool(
embed, lengths["bigram"].float, bigram_input.size()[0]
)
embed = self.dropout(embed)
embedding_list.append(embed)
if self.trigram_field is not None:
trigram_input = input_["trigram"]
embed = self.trigram_embeddings(trigram_input)
embed = self._pool(
embed, lengths["trigram"].float, trigram_input.size()[0]
)
embed = self.dropout(embed)
embedding_list.append(embed)
concat_embed = torch.cat(embedding_list, dim=1)
encoded = self.encoder(concat_embed)
return self.classifier(encoded)
class DanGuesser(AbstractGuesser):
def __init__(self, config_num):
super(DanGuesser, self).__init__(config_num)
if self.config_num is not None:
guesser_conf = conf["guessers"]["qanta.guesser.dan.DanGuesser"][
self.config_num
]
self.gradient_clip = guesser_conf["gradient_clip"]
self.n_hidden_units = guesser_conf["n_hidden_units"]
self.n_hidden_layers = guesser_conf["n_hidden_layers"]
self.nn_dropout = guesser_conf["nn_dropout"]
self.batch_size = guesser_conf["batch_size"]
self.use_wiki = guesser_conf["use_wiki"]
self.n_wiki_sentences = guesser_conf["n_wiki_sentences"]
self.wiki_title_replace_token = guesser_conf["wiki_title_replace_token"]
self.lowercase = guesser_conf["lowercase"]
self.combined_ngrams = guesser_conf["combined_ngrams"]
self.unigrams = guesser_conf["unigrams"]
self.bigrams = guesser_conf["bigrams"]
self.trigrams = guesser_conf["trigrams"]
self.combined_max_vocab_size = guesser_conf["combined_max_vocab_size"]
self.unigram_max_vocab_size = guesser_conf["unigram_max_vocab_size"]
self.bigram_max_vocab_size = guesser_conf["bigram_max_vocab_size"]
self.trigram_max_vocab_size = guesser_conf["trigram_max_vocab_size"]
self.pooling = guesser_conf["pooling"]
self.random_seed = guesser_conf["random_seed"]
self.page_field: Optional[Field] = None
self.qanta_id_field: Optional[Field] = None
self.text_field: Optional[Field] = None
self.unigram_field: Optional[Field] = None
self.bigram_field: Optional[Field] = None
self.trigram_field: Optional[Field] = None
self.n_classes = None
self.emb_dim = None
self.model_file = None
self.model = None
self.optimizer = None
self.criterion = None
self.scheduler = None
@property
def ans_to_i(self):
return self.page_field.vocab.stoi
@property
def i_to_ans(self):
return self.page_field.vocab.itos
def parameters(self):
return conf["guessers"]["qanta.guesser.dan.DanGuesser"][self.config_num]
def train(self, training_data):
log.info("Loading Quiz Bowl dataset")
train_iter, val_iter, dev_iter = QuizBowl.iters(
batch_size=self.batch_size,
lower=self.lowercase,
use_wiki=self.use_wiki,
n_wiki_sentences=self.n_wiki_sentences,
replace_title_mentions=self.wiki_title_replace_token,
combined_ngrams=self.combined_ngrams,
unigrams=self.unigrams,
bigrams=self.bigrams,
trigrams=self.trigrams,
combined_max_vocab_size=self.combined_max_vocab_size,
unigram_max_vocab_size=self.unigram_max_vocab_size,
bigram_max_vocab_size=self.bigram_max_vocab_size,
trigram_max_vocab_size=self.trigram_max_vocab_size,
)
log.info(f"N Train={len(train_iter.dataset.examples)}")
log.info(f"N Test={len(val_iter.dataset.examples)}")
fields: Dict[str, Field] = train_iter.dataset.fields
self.page_field = fields["page"]
self.n_classes = len(self.ans_to_i)
self.qanta_id_field = fields["qanta_id"]
self.emb_dim = 300
if "text" in fields:
self.text_field = fields["text"]
log.info(f"Text Vocab={len(self.text_field.vocab)}")
if "unigram" in fields:
self.unigram_field = fields["unigram"]
log.info(f"Unigram Vocab={len(self.unigram_field.vocab)}")
if "bigram" in fields:
self.bigram_field = fields["bigram"]
log.info(f"Bigram Vocab={len(self.bigram_field.vocab)}")
if "trigram" in fields:
self.trigram_field = fields["trigram"]
log.info(f"Trigram Vocab={len(self.trigram_field.vocab)}")
log.info("Initializing Model")
self.model = DanModel(
self.n_classes,
text_field=self.text_field,
unigram_field=self.unigram_field,
bigram_field=self.bigram_field,
trigram_field=self.trigram_field,
emb_dim=self.emb_dim,
n_hidden_units=self.n_hidden_units,
n_hidden_layers=self.n_hidden_layers,
nn_dropout=self.nn_dropout,
pooling=self.pooling,
)
if CUDA:
self.model = self.model.cuda()
log.info(f"Parameters:\n{self.parameters()}")
log.info(f"Model:\n{self.model}")
self.optimizer = Adam(self.model.parameters())
self.criterion = nn.CrossEntropyLoss()
self.scheduler = lr_scheduler.ReduceLROnPlateau(
self.optimizer, patience=5, verbose=True, mode="max"
)
temp_prefix = get_tmp_filename()
self.model_file = f"{temp_prefix}.pt"
manager = TrainingManager(
[
BaseLogger(log_func=log.info),
TerminateOnNaN(),
EarlyStopping(monitor="test_acc", patience=10, verbose=1),
MaxEpochStopping(100),
ModelCheckpoint(
create_save_model(self.model), self.model_file, monitor="test_acc"
),
]
)
log.info("Starting training")
epoch = 0
while True:
self.model.train()
train_acc, train_loss, train_time = self.run_epoch(train_iter)
self.model.eval()
test_acc, test_loss, test_time = self.run_epoch(val_iter)
stop_training, reasons = manager.instruct(
train_time, train_loss, train_acc, test_time, test_loss, test_acc
)
if stop_training:
log.info(" ".join(reasons))
break
else:
self.scheduler.step(test_acc)
epoch += 1
def run_epoch(self, iterator: Iterator):
is_train = iterator.train
batch_accuracies = []
batch_losses = []
epoch_start = time.time()
for batch in iterator:
input_dict = {}
lengths_dict = {}
if hasattr(batch, "text"):
text, lengths = batch.text
input_dict["text"] = text
lengths_dict["text"] = lengths
if hasattr(batch, "unigram"):
text, lengths = batch.unigram
input_dict["unigram"] = text
lengths_dict["unigram"] = lengths
if hasattr(batch, "bigram"):
text, lengths = batch.bigram
input_dict["bigram"] = text
lengths_dict["bigram"] = lengths
if hasattr(batch, "trigram"):
text, lengths = batch.trigram
input_dict["trigram"] = text
lengths_dict["trigram"] = lengths
page = batch.page
qanta_ids = batch.qanta_id.cuda()
if is_train:
self.model.zero_grad()
out = self.model(input_dict, lengths_dict, qanta_ids)
_, preds = torch.max(out, 1)
accuracy = torch.mean(torch.eq(preds, page).float()).cpu().data
batch_loss = self.criterion(out, page)
if is_train:
batch_loss.backward()
torch.nn.utils.clip_grad_norm(
self.model.parameters(), self.gradient_clip
)
self.optimizer.step()
batch_accuracies.append(accuracy)
batch_losses.append(batch_loss.cpu().data)
epoch_end = time.time()
return np.mean(batch_accuracies), np.mean(batch_losses), epoch_end - epoch_start
def guess(self, questions: List[QuestionText], max_n_guesses: Optional[int]):
if len(questions) == 0:
return []
batch_size = 500
if len(questions) < batch_size:
return self._guess_batch(questions, max_n_guesses)
else:
all_guesses = []
for i in range(0, len(questions), batch_size):
batch_questions = questions[i : i + batch_size]
guesses = self._guess_batch(batch_questions, max_n_guesses)
all_guesses.extend(guesses)
return all_guesses
def _guess_batch(self, questions: List[QuestionText], max_n_guesses: Optional[int]):
if len(questions) == 0:
return []
input_dict = {}
lengths_dict = {}
if self.text_field is not None:
examples = [self.text_field.preprocess(q) for q in questions]
text, lengths = self.text_field.process(examples, None, False)
input_dict["text"] = text
lengths_dict["text"] = lengths
if self.unigram_field is not None:
examples = [self.unigram_field.preprocess(q) for q in questions]
text, lengths = self.unigram_field.process(examples, None, False)
input_dict["unigram"] = text
lengths_dict["unigram"] = lengths
if self.bigram_field is not None:
examples = [self.bigram_field.preprocess(q) for q in questions]
text, lengths = self.bigram_field.process(examples, None, False)
input_dict["bigram"] = text
lengths_dict["bigram"] = lengths
if self.trigram_field is not None:
examples = [self.trigram_field.preprocess(q) for q in questions]
text, lengths = self.trigram_field.process(examples, None, False)
input_dict["trigram"] = text
lengths_dict["trigram"] = lengths
qanta_ids = self.qanta_id_field.process([0 for _ in questions]).cuda()
guesses = []
out = self.model(input_dict, lengths_dict, qanta_ids)
probs = F.softmax(out).data.cpu().numpy()
n_examples = probs.shape[0]
preds = np.argsort(-probs, axis=1)
for i in range(n_examples):
guesses.append([])
for p in preds[i][:max_n_guesses]:
guesses[-1].append((self.i_to_ans[p], probs[i][p]))
return guesses
def save(self, directory: str):
shutil.copyfile(self.model_file, os.path.join(directory, "dan.pt"))
shell(f"rm -f {self.model_file}")
with open(os.path.join(directory, "dan.pkl"), "wb") as f:
cloudpickle.dump(
{
"page_field": self.page_field,
"combined_text_field": self.text_field,
"unigram_text_field": self.unigram_field,
"bigram_text_field": self.bigram_field,
"trigram_text_field": self.trigram_field,
"combined_ngrams": self.combined_ngrams,
"unigrams": self.unigrams,
"bigrams": self.bigrams,
"trigrams": self.trigrams,
"combined_max_vocab_size": self.combined_max_vocab_size,
"unigram_max_vocab_size": self.unigram_max_vocab_size,
"bigram_max_vocab_size": self.bigram_max_vocab_size,
"trigram_max_vocab_size": self.trigram_max_vocab_size,
"qanta_id_field": self.qanta_id_field,
"n_classes": self.n_classes,
"gradient_clip": self.gradient_clip,
"n_hidden_units": self.n_hidden_units,
"n_hidden_layers": self.n_hidden_layers,
"nn_dropout": self.nn_dropout,
"batch_size": self.batch_size,
"use_wiki": self.use_wiki,
"n_wiki_sentences": self.n_wiki_sentences,
"wiki_title_replace_token": self.wiki_title_replace_token,
"lowercase": self.lowercase,
"pooling": self.pooling,
"random_seed": self.random_seed,
"config_num": self.config_num,
},
f,
)
@classmethod
def load(cls, directory: str):
with open(os.path.join(directory, "dan.pkl"), "rb") as f:
params = cloudpickle.load(f)
guesser = DanGuesser(params["config_num"])
guesser.page_field = params["page_field"]
guesser.qanta_id_field = params["qanta_id_field"]
guesser.text_field = params["combined_text_field"]
guesser.unigram_field = params["unigram_text_field"]
guesser.bigram_field = params["bigram_text_field"]
guesser.trigram_field = params["trigram_text_field"]
guesser.combined_ngrams = params["combined_ngrams"]
guesser.unigrams = params["unigrams"]
guesser.bigrams = params["bigrams"]
guesser.trigrams = params["trigrams"]
guesser.combined_max_vocab_size = params["combined_max_vocab_size"]
guesser.unigram_max_vocab_size = params["unigram_max_vocab_size"]
guesser.bigram_max_vocab_size = params["bigram_max_vocab_size"]
guesser.trigram_max_vocab_size = params["trigram_max_vocab_size"]
guesser.n_classes = params["n_classes"]
guesser.gradient_clip = params["gradient_clip"]
guesser.n_hidden_units = params["n_hidden_units"]
guesser.n_hidden_layers = params["n_hidden_layers"]
guesser.nn_dropout = params["nn_dropout"]
guesser.use_wiki = params["use_wiki"]
guesser.n_wiki_sentences = params["n_wiki_sentences"]
guesser.wiki_title_replace_token = params["wiki_title_replace_token"]
guesser.lowercase = params["lowercase"]
guesser.pooling = params["pooling"]
guesser.random_seed = params["random_seed"]
guesser.model = DanModel(
guesser.n_classes,
text_field=guesser.text_field,
unigram_field=guesser.unigram_field,
bigram_field=guesser.bigram_field,
trigram_field=guesser.trigram_field,
init_embeddings=False,
emb_dim=300,
n_hidden_layers=guesser.n_hidden_layers,
n_hidden_units=guesser.n_hidden_units,
pooling=guesser.pooling,
)
guesser.model.load_state_dict(
torch.load(
os.path.join(directory, "dan.pt"),
map_location=lambda storage, loc: storage,
)
)
guesser.model.eval()
if CUDA:
guesser.model = guesser.model.cuda()
return guesser
@classmethod
def targets(cls):
return ["dan.pt", "dan.pkl"]
| |
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 21101 if testnet else 4869
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| |
__all__ = ["make_node", "CM_Node"]
import bpy
import bpy.types
import bpy_types
import bmesh
import bmesh.ops
import math
import mathutils
import pyconspack as cpk
from array import array
from pyconspack import Conspack
from mathutils import Matrix
import io_scene_consmodel.consmodel as consmodel
from io_scene_consmodel.util import (matrix_to_vec, AttrPack, defencode)
# Nodes
class CM_Node(AttrPack):
def preinit(self, ob=None, **kw):
if(not ob):
return
self.name = ob.name
self.transform = (hasattr(ob, 'matrix_local') and
matrix_to_vec(ob.matrix_local))
vals = ()
if(hasattr(ob, 'children')):
vals = ob.children
elif(hasattr(ob, 'objects')):
vals = ob.objects
if(vals):
self.children = cpk.Vector()
for val in vals:
if(not val.parent or val.parent == ob):
self.children.append(make_node(val))
def best_integer_type(i):
if (i < 2**8): return 'B'
elif(i < 2**16): return 'H'
else: return 'I'
def int_array(a):
return array(best_integer_type(len(a)), a)
class CM_Mesh(CM_Node):
def preinit(self, ob=None, **kw):
super().preinit(ob, **kw)
self.primitive_type = cpk.keyword('triangle')
self.faces = array('I')
self.vertices = array('f')
self.normals = array('f')
self.materials = cpk.Vector()
self.face_normals = cpk.Vector()
if(ob):
if(ob.data in Cache.MESH_CACHE):
self.faces, self.normals, self.face_normals, self.vertices, self.materials = Cache.MESH_CACHE[ob.data]
else:
bm = bmesh.new()
bm.from_mesh(ob.data)
bmesh.ops.triangulate(bm, faces=bm.faces)
for v in bm.verts:
self.vertices.extend(v.co.xyz)
self.normals.extend(v.normal)
for f in bm.faces:
self.faces.extend((v.index for v in f.verts))
self.normals.extend(f.normal)
fni = math.floor(len(self.normals)/3)-1
if(f.smooth):
self.face_normals.extend((v.index for v in f.verts))
else:
self.face_normals.extend((fni, fni, fni))
self.faces = int_array(self.faces)
self.face_normals = int_array(self.face_normals)
bm.free()
for slot in ob.material_slots:
if(slot.material in Cache.MAT_CACHE):
mat = Cache.MAT_CACHE[slot.material]
else:
mat = CM_Material(ob=slot.material)
self.materials.append(mat)
Cache.MESH_CACHE[ob.data] = (self.faces, self.normals, self.face_normals, self.vertices, self.materials)
class CM_Camera(CM_Node):
def preinit(self, ob=None, **kw):
super().preinit(ob, **kw)
self.fov = ob.data.angle
self.clip_near = ob.data.clip_start
self.clip_far = ob.data.clip_end
self.aspect = ob.data.sensor_width / ob.data.sensor_height
class CM_LightPoint(CM_Node):
def preinit(self, ob=None, **kw):
super().preinit(ob, **kw)
self.position = array('f', ob.location)
self.diffuse = array('f', (0, 0, 0))
self.specular = array('f', (0, 0, 0))
if(ob.data.use_diffuse):
self.diffuse = array('f', ob.data.energy * ob.data.color)
if(ob.data.use_specular):
self.specular = array('f', ob.data.energy * ob.data.color)
self.attenuation_constant = 1.0
self.attenuation_linear = 0.0
self.attenuation_quadratic = 0.0
if(ob.data.falloff_type == 'CONSTANT'):
self.attenuation_constant = ob.data.distance
elif(ob.data.falloff_type == 'INVERSE_LINEAR'):
self.attenuation_linear = 1/ob.data.distance
elif(ob.data.falloff_type == 'INVERSE_SQUARE'):
self.attenuation_quadratic = 1/(ob.data.distance**2)
elif(ob.data.falloff_type == 'LINEAR_QUADRATIC_WEIGHTED'):
self.attenuation_linear = 1/(ob.data.linear_attenuation * ob.data.distance)
self.attenuation_quadratic = 1/((ob.data.quadratic_attenuation * ob.data.distance)**2)
class CM_Material(AttrPack):
def preinit(self, ob=None, **kw):
self.name = ""
self.values = v = dict()
m = ob
world = consmodel.Consmodel.SCENE.world
if(ob):
self.name = m.name
v['alpha'] = m.alpha
v['ambient'] = array('f', world.ambient_color * m.ambient)
v['diffuse'] = array('f', m.diffuse_color * m.diffuse_intensity)
# This was taken from the Blinn specular code in shadeoutput.c
roughness = m.specular_hardness * m.specular_intensity
if(roughness < 0.00001):
roughness = 0.0
elif(roughness < 100.0):
roughness = math.sqrt(1.0/roughness)
else:
roughness = math.sqrt(100.0/roughness)
v['roughness'] = roughness
specular = list(m.specular_color * m.specular_alpha)
v['specular'] = array('f', specular)
v['specular-ior'] = m.specular_ior
Cache.MAT_CACHE[ob] = self
# make_node
class Cache:
CACHE = dict()
MESH_CACHE = dict()
MAT_CACHE = dict()
def make_node(bval):
if(bval in Cache.CACHE):
return Cache.CACHE[bval]
if(isinstance(bval, bpy.types.Scene)):
ob = CM_Node(ob=bval)
elif(isinstance(bval, bpy_types.Object)):
if(bval.type == 'MESH'):
ob = CM_Mesh(ob=bval)
elif(bval.type == 'CAMERA'):
ob = CM_Camera(ob=bval)
elif(bval.type == 'LAMP' and bval.data.type == 'POINT'):
ob = CM_LightPoint(ob=bval)
else:
ob = CM_Node(ob=bval)
Cache.CACHE[bval] = ob
return ob
def clear_cache():
Cache.CACHE = dict()
Cache.MESH_CACHE = dict()
Cache.MAT_CACHE = dict()
# Conspack regs
defencode(CM_Node, "node")
defencode(CM_Mesh, "mesh")
defencode(CM_Camera, "camera")
defencode(CM_LightPoint, "light-point")
defencode(CM_Material, "material-simple")
| |
import hashlib, binascii, struct, array, os, time, sys, optparse
import scrypt
from construct import *
def main():
options = get_args()
algorithm = get_algorithm(options)
input_script = create_input_script(options.timestamp)
output_script = create_output_script(options.pubkey)
# hash merkle root is the double sha256 hash of the transaction(s)
tx = create_transaction(input_script, output_script,options)
hash_merkle_root = hashlib.sha256(hashlib.sha256(tx).digest()).digest()
print_block_info(options, hash_merkle_root)
block_header = create_block_header(hash_merkle_root, options.time, options.bits, options.nonce)
genesis_hash, nonce = generate_hash(block_header, algorithm, options.nonce, options.bits)
announce_found_genesis(genesis_hash, nonce)
def get_args():
parser = optparse.OptionParser()
parser.add_option("-t", "--time", dest="time", default=int(time.time()),
type="int", help="the (unix) time when the genesisblock is created")
parser.add_option("-z", "--timestamp", dest="timestamp", default="The Times 03/Jan/2009 Chancellor on brink of second bailout for banks",
type="string", help="the pszTimestamp found in the coinbase of the genesisblock")
parser.add_option("-n", "--nonce", dest="nonce", default=0,
type="int", help="the first value of the nonce that will be incremented when searching the genesis hash")
parser.add_option("-a", "--algorithm", dest="algorithm", default="SHA256",
help="the PoW algorithm: [SHA256|scrypt|X11|X13|X15]")
parser.add_option("-p", "--pubkey", dest="pubkey", default="04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f",
type="string", help="the pubkey found in the output script")
parser.add_option("-v", "--value", dest="value", default=5000000000,
type="int", help="the value in coins for the output, full value (exp. in bitcoin 5000000000 - To get other coins value: Block Value * 100000000)")
parser.add_option("-b", "--bits", dest="bits",
type="int", help="the target in compact representation, associated to a difficulty of 1")
(options, args) = parser.parse_args()
if not options.bits:
if options.algorithm == "scrypt" or options.algorithm == "X11" or options.algorithm == "X13" or options.algorithm == "X15":
options.bits = 0x1e0ffff0
else:
options.bits = 0x1d00ffff
return options
def get_algorithm(options):
supported_algorithms = ["SHA256", "scrypt", "X11", "X13", "X15"]
if options.algorithm in supported_algorithms:
return options.algorithm
else:
sys.exit("Error: Given algorithm must be one of: " + str(supported_algorithms))
def create_input_script(psz_timestamp):
psz_prefix = ""
#use OP_PUSHDATA1 if required
if len(psz_timestamp) > 76: psz_prefix = '4c'
script_prefix = '04ffff001d0104' + psz_prefix + chr(len(psz_timestamp)).encode('hex')
print (script_prefix + psz_timestamp.encode('hex'))
return (script_prefix + psz_timestamp.encode('hex')).decode('hex')
def create_output_script(pubkey):
script_len = '41'
OP_CHECKSIG = 'ac'
return (script_len + pubkey + OP_CHECKSIG).decode('hex')
def create_transaction(input_script, output_script,options):
transaction = Struct("transaction",
Bytes("version", 4),
Byte("num_inputs"),
StaticField("prev_output", 32),
UBInt32('prev_out_idx'),
Byte('input_script_len'),
Bytes('input_script', len(input_script)),
UBInt32('sequence'),
Byte('num_outputs'),
Bytes('out_value', 8),
Byte('output_script_len'),
Bytes('output_script', 0x43),
UBInt32('locktime'))
tx = transaction.parse('\x00'*(127 + len(input_script)))
tx.version = struct.pack('<I', 1)
tx.num_inputs = 1
tx.prev_output = struct.pack('<qqqq', 0,0,0,0)
tx.prev_out_idx = 0xFFFFFFFF
tx.input_script_len = len(input_script)
tx.input_script = input_script
tx.sequence = 0xFFFFFFFF
tx.num_outputs = 1
tx.out_value = struct.pack('<q' ,options.value)#0x000005f5e100)#012a05f200) #50 coins
#tx.out_value = struct.pack('<q' ,0x000000012a05f200) #50 coins
tx.output_script_len = 0x43
tx.output_script = output_script
tx.locktime = 0
return transaction.build(tx)
def create_block_header(hash_merkle_root, time, bits, nonce):
block_header = Struct("block_header",
Bytes("version",4),
Bytes("hash_prev_block", 32),
Bytes("hash_merkle_root", 32),
Bytes("time", 4),
Bytes("bits", 4),
Bytes("nonce", 4))
genesisblock = block_header.parse('\x00'*80)
genesisblock.version = struct.pack('<I', 1)
genesisblock.hash_prev_block = struct.pack('<qqqq', 0,0,0,0)
genesisblock.hash_merkle_root = hash_merkle_root
genesisblock.time = struct.pack('<I', time)
genesisblock.bits = struct.pack('<I', bits)
genesisblock.nonce = struct.pack('<I', nonce)
return block_header.build(genesisblock)
# https://en.bitcoin.it/wiki/Block_hashing_algorithm
def generate_hash(data_block, algorithm, start_nonce, bits):
print 'Searching for genesis hash..'
nonce = start_nonce
last_updated = time.time()
# https://en.bitcoin.it/wiki/Difficulty
target = (bits & 0xffffff) * 2**(8*((bits >> 24) - 3))
while True:
sha256_hash, header_hash = generate_hashes_from_block(data_block, algorithm)
last_updated = calculate_hashrate(nonce, last_updated)
if is_genesis_hash(header_hash, target):
if algorithm == "X11" or algorithm == "X13" or algorithm == "X15":
return (header_hash, nonce)
return (sha256_hash, nonce)
else:
nonce = nonce + 1
data_block = data_block[0:len(data_block) - 4] + struct.pack('<I', nonce)
def generate_hashes_from_block(data_block, algorithm):
sha256_hash = hashlib.sha256(hashlib.sha256(data_block).digest()).digest()[::-1]
header_hash = ""
if algorithm == 'scrypt':
header_hash = scrypt.hash(data_block,data_block,1024,1,1,32)[::-1]
elif algorithm == 'SHA256':
header_hash = sha256_hash
elif algorithm == 'X11':
try:
exec('import %s' % "xcoin_hash")
except ImportError:
sys.exit("Cannot run X11 algorithm: module xcoin_hash not found")
header_hash = xcoin_hash.getPoWHash(data_block)[::-1]
elif algorithm == 'X13':
try:
exec('import %s' % "x13_hash")
except ImportError:
sys.exit("Cannot run X13 algorithm: module x13_hash not found")
header_hash = x13_hash.getPoWHash(data_block)[::-1]
elif algorithm == 'X15':
try:
exec('import %s' % "x15_hash")
except ImportError:
sys.exit("Cannot run X15 algorithm: module x15_hash not found")
header_hash = x15_hash.getPoWHash(data_block)[::-1]
return sha256_hash, header_hash
def is_genesis_hash(header_hash, target):
return int(header_hash.encode('hex_codec'), 16) < target
def calculate_hashrate(nonce, last_updated):
if nonce % 1000000 == 999999:
now = time.time()
hashrate = round(1000000/(now - last_updated))
generation_time = round(pow(2, 32) / hashrate / 3600, 1)
sys.stdout.write("\r%s hash/s, estimate: %s h"%(str(hashrate), str(generation_time)))
sys.stdout.flush()
return now
else:
return last_updated
def print_block_info(options, hash_merkle_root):
print "algorithm: " + (options.algorithm)
print "merkle hash: " + hash_merkle_root[::-1].encode('hex_codec')
print "pszTimestamp: " + options.timestamp
print "pubkey: " + options.pubkey
print "time: " + str(options.time)
print "bits: " + str(hex(options.bits))
def announce_found_genesis(genesis_hash, nonce):
print "genesis hash found!"
print "nonce: " + str(nonce)
print "genesis hash: " + genesis_hash.encode('hex_codec')
# GOGOGO!
main()
| |
import json
import tornado.ioloop, tornado.websocket
from tornado import gen
import traceback
class TradersBot:
# takes in variable number of args
def __doNothing(self, *args):
pass
def __init__(self, host, id, password, token = None):
'''
hyuh
'''
self.host = host
self.id = id
self.password = password
self.token = token
self.onAckRegister = self.__doNothing
"""MangoCore has acknowledged your registration. Callback function should be in form
`f(msg, TradersOrder)` where msg is in the following format:
.. code-block:: python
{
"message_type": "ACK REGISTER",
"case_meta": {
"case_length": 300,
"securities": {
"AAPL": {
"tradeable": True,
"starting_price": 100,
"underlyings": {
"AAPL": 1
}
},
# and so on...one per security
},
"underlyings": {
"AAPL": {
"name": "AAPL",
"limit": 1000
},
# and so on...one per security
}
},
"end_time": "0001-01-01T00:00:00Z",
"market_states": {
"AAPL": {
"ticker": "AAPL",
"bids": {},
"asks": {},
"last_price": 100,
"time": "2015-03-21T20:54:05.530846913Z"
},
# and so on...one per security
},
"trader_state": {
"cash": {"USD": 100000}
# ...
# this trader_state identical to one in onTraderUpdate
# see there for full details of this object
},
# this token will match the one passed into TradersBot constructor
"token": "aylmao"
}
"""
self.onMarketUpdate = self.__doNothing
"""An update with the orderbook and last transaction price of some single ticker has arrived.
This update will arrive roughly every half-second, as opposed to every time some event has changed
the orderbook. But, you can still keep track of the orderbook at all times by listening to
onTrade, which you do receive all of, and updating the orderbook accordingly. Callback function
should be in form `f(msg, TradersOrder)` where `msg` is in the following format:
.. code-block:: python
{
"message_type": "MARKET UPDATE",
"market_state": {
"ticker": "AAPL",
"bids": {
"99.86": 350,
"99.87": 350,
"99.88": 300,
"99.89": 300,
"99.90": 300,
"99.91": 99,
"99.92": 170
},
"asks": {
"100.07": 133,
"100.08": 200,
"100.09": 250,
"100.10": 300,
"100.11": 300,
"100.13": 350,
"100.14": 350
},
"last_price": 100.07,
"time": "2015-03-21T21:12:17.764384883Z"
}
}
"""
self.onTraderUpdate = self.__doNothing
"""A periodic update with your current trade state (positions, PNL, open orders, ...) has arrived.
This update will also only arrive roughly every half-second, but you should be internally keeping
track of your positions and cash anyways, so much of this information should already be known.
Callback function should be in form `f(msg, TradersOrder)` where `msg` is in the following format:
.. code-block:: python
{
"message_type": "TRADER UPDATE",
"trader_state": {
"cash": {"USD": 100000},
"positions": {
"AAPL": 0,
"IBM": 0,
"IDX": 0
},
"open_orders": {},
"pnl": {"USD": 0},
"time": "2015-03-21T20:54:05.530826573Z",
"total_fees": 0,
"total_fines": 0,
"total_rebates": 0
}
}
"""
self.onTrade = self.__doNothing
"""A trade (not necessarily involving you) has occurred. Callback function should be in form
`f(msg, TradersOrder)` where `msg` is in the following format:
.. code-block:: python
{
"message_type": "TRADE",
"trades": [
{
"trade_id": 12,
"ticker": "AAPL",
"buy_order_id": 88,
"sell_order_id": 48,
"quantity": 50,
"price": 100.07,
# of the two orders that matched, is the more recent one a buy?
"buy": True,
"time": "2015-03-21T21:12:17.764311405Z"
},
# more trade {...} of the same type
]
}
order_id and trade_id are separate, globally incrementing integers (i.e. the first submitted
order has id 0; the next has id 1, and so on; the first trade has id 0, the next has id 1, and
so on).
"""
self.onAckModifyOrders = self.__doNothing
"""MangoCore has acknowledged your sell/buy/cancel order. Callback function should be in form
`f(msg, TradersOrder)` where `msg` is in the following format:
.. code-block:: python
{
"message_type": "ACK MODIFY ORDERS",
"cancels": {
"324": true, "1915": false
},
"orders": [
{
"order_id": "3748",
"ticker": "AAPL",
"buy": True,
"quantity": 100,
"price": 99.74,
"token": "sqv6ajor"
},
# more order {...} of the same type
],
"token": "ze12a9k9"
}
A cancel with a true value indicates a successful cancellation.
"""
self.onNews = self.__doNothing
"""A news event has arrived. Callback function should be in form
`f(msg, TradersOrder)` where msg is in the following format:
.. code-block:: python
{
"message_type": "NEWS",
"news": {
"headline": "Apple releases new Macbook",
"source": "Ars Technica",
"body": "Today, Apple releases a new shiny laptop.",
"time": 235,
"price": 0
}
}
Where time is the number of ticks since the round started, and price is the amount you must pay
per news item you receive from this source (currently irrelevant).
"""
self.onAckSubscribe = self.__doNothing
"""
.. note::
The 2016 competition won't require subscribing to news, so this callback won't be used.
MangoCore has acknowledged your subscription to a news source.
"""
self.onTenderOffer = self.__doNothing
"""
.. note::
The 2016 competition won't involve tender offers, so this callback won't be used.
There's a tender offer you can accept.
"""
self.onAckTenderOffer = self.__doNothing
"""
.. note::
The 2016 competition won't involve tender offers, so this callback won't be used.
MangoCore has acknowledged your tender offer order.
"""
self.onPing = self.__doNothing
"""
.. note::
MangoCore sends ping messages just to keep WebSocket connections alive. This probably
isn't something that's useful to listen for.
MangoCore sent the client a ping message. Callback function should be in the form
`f(msg, TradersOrder)` where `msg` is the following:
.. code-block:: python
{ "message_type": "PING" }
"""
self.__periodics = []
# Reads input from from the server and processes
# them accordingly
def __handle_read(self, msg):
if msg is None:
print("WebSocket connection has closed")
tornado.ioloop.IOLoop.instance().stop()
return
msg = json.loads(msg)
func = self.fmap.get(msg['message_type'])
if func is not None:
order = TradersOrder()
try:
func(msg, order)
order.toJson()
for j in order.jsons:
self.__write(j)
except Exception as e:
traceback.print_exc()
def __write(self, msg):
self.ws.write_message(msg)
@gen.coroutine
def __connect(self, subAllTrade):
self.ws = yield tornado.websocket.websocket_connect('ws://%s:10914/%s/%s' %
(self.host, self.id, self.password), on_message_callback = self.__handle_read)
if self.token is not None:
self.__write(json.dumps({'message_type' : 'REGISTER', 'token' : self.token, 'sub_all_trades': subAllTrade}))
else:
self.__write(json.dumps({'message_type' : 'REGISTER', 'sub_all_trades': subAllTrade}))
def run(self):
'''
Starts the TradersBot. After this point, you can't add or modify any callbacks of
the TradersBot.
'''
subAllTrade = False
# if not subscribed to onTrade, only receive your own onTrade
#if self.onTrade != self.__doNothing:
# subAllTrade = True
# print("Subscribing to all trades", subAllTrade)
self.fmap = {
'ACK REGISTER' : self.onAckRegister,
'PING' : self.onPing,
'MARKET UPDATE' : self.onMarketUpdate,
'TRADER UPDATE' : self.onTraderUpdate,
'TRADE' : self.onTrade,
'ACK MODIFY ORDERS' : self.onAckModifyOrders,
'NEWS' : self.onNews,
'ACK SUBSCRIBE' : self.onAckSubscribe,
'TENDER OFFER' : self.onTenderOffer,
'ACK TENDER OFFER' : self.onAckTenderOffer
}
self.__connect(subAllTrade)
tornado.ioloop.PeriodicCallback(lambda : None, 1000).start()
for p in self.__periodics:
tornado.ioloop.PeriodicCallback(p[0], p[1]).start()
tornado.ioloop.IOLoop.instance().start()
def addPeriodicCallback(self, func, periodMs):
'''
.. warning::
This function only exists because it's used in MangoCore stress tests to make sure
MangoCore remains performant under periods of high frequency trade. We strongly advise
against using this function in your trading code. Trades should happen in reaction to
market events, which is what callbacks are for.
Every `periodMs` milliseconds, TradersBot will call `func` with a blank TradersOrder.
`func` should take in one parameter, a `TradersOrder`, that allows `func` to place orders.
'''
def f():
order = TradersOrder()
func(order)
order.toJson()
for j in order.jsons:
self.__write(j)
self.__periodics.append((f, periodMs))
class TradersOrder:
def __init__(self):
self.orders = []
self.cancels = []
self.jsons = []
def addBuy(self, ticker, quantity, price = None, token = None):
'''
Add a buy order for `ticker`, of size `quantity` shares. If no price is passed in,
the buy is taken as a market order. If `quantity` is negative, it is interpreted as a
sell order of the positive amount. See :ref:`tokens` for the token parameter.
'''
self.addTrade(ticker, True, quantity, price, token)
def addSell(self, ticker, quantity, price = None, token = None):
'''
Add a sell order for `ticker`, of size `quantity` shares. If no price is passed in,
the sell is taken as a market order. If `quantity` is negative, it is interpreted as a
buy order of the positive amount. See :ref:`tokens` for the token parameter.
'''
self.addTrade(ticker, False, quantity, price, token)
def addTrade(self, ticker, isBuy, quantity, price = None, token = None):
'''
Submit an order for `ticker`, of size `quantity` shares. The order is a buy if `isBuy`
is True and a sell otherwise. The order is a market order if no price is passed in.
If `quantity` is negative, it is interpreted as an order of the opposite type, of the
positive amount. See :ref:`tokens` for the token parameter.
'''
if quantity == 0:
return
if quantity < 0:
quantity *= -1
isBuy = not isBuy
self.orders.append({"ticker":ticker, "buy":isBuy, "quantity":quantity})
if price is not None:
self.orders[-1]["price"] = price
if token is not None:
self.orders[-1]["token"] = token
def addCancel(self, ticker, orderId):
'''
Cancel the order for `ticker` with the given `orderId`. The `orderId` is returned
on `onAckModifyOrders`.
'''
self.cancels.append({"ticker":ticker, "order_id":orderId})
def toJson(self, token = None):
'''
Turns a TradersOrder into a JSON string ready to be sent over to MangoCore. See :ref:`JSON`
for the details of this format. By default, TradersBot will call this function for you with
``token=None`` and then send the JSON to MangoCore, so there's no need to call it manually.
However, there are two possible reasons to call it in your own code:
#. You want to specify a token for this set of orders, so you can keep track of them later.
In this case, call ``toJson(token)`` at the end of your code, after you added all your
buys, sells, and cancels.
#. You want to split your orders to be split up into multiple JSON messages. This scenario
is highly unlikely; splitting up the orders will make you hit your rate limit faster,
and offers no obvious benefits. Regardless, here is example code showing how to do so
and the how the sent message differs from the usual:
.. code-block:: python
# When callback1 is called, MangoCore will receive 1 message
# 1: {"message_type":"MODIFY ORDERS", "orders":[{"ticker":"AAPL","buy":true,"quantity":20},{"ticker":"GOOG","buy":false,"quantity":50}]}
def callback1(msg, order):
order.addBuy('AAPL', 20)
order.addSell('GOOG', 50)
# When callback2 is called, MangoCore will receive 2 messages
# 1: {"message_type":"MODIFY ORDERS", "orders":[{"ticker":"AAPL","buy":true,"quantity":20}]}
# 2: {"message_type":"MODIFY ORDERS", "orders":[{"ticker":"GOOG","buy":false,"quantity":50}], "token":"TOKEN123"}
def callback2(msg, order):
order.addBuy('AAPL', 20)
order.toJson()
order.addSell('GOOG', 50)
order.toJson("TOKEN123")
'''
msgMap = {"message_type":"MODIFY ORDERS"}
if len(self.orders) > 0:
msgMap["orders"] = self.orders
if len(self.cancels) > 0:
msgMap["cancels"] = self.cancels
if len(msgMap) > 1:
# if we have more than just "message_type" key
if token is not None:
msgMap["token"] = token
self.jsons.append(json.dumps(msgMap))
self.orders = []
self.cancels = []
| |
# mako/cache.py
# Copyright (C) 2006-2014 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from mako import compat, util
_cache_plugins = util.PluginLoader("mako.cache")
register_plugin = _cache_plugins.register
register_plugin("beaker", "mako.ext.beaker_cache", "BeakerCacheImpl")
class Cache(object):
"""Represents a data content cache made available to the module
space of a specific :class:`.Template` object.
.. versionadded:: 0.6
:class:`.Cache` by itself is mostly a
container for a :class:`.CacheImpl` object, which implements
a fixed API to provide caching services; specific subclasses exist to
implement different
caching strategies. Mako includes a backend that works with
the Beaker caching system. Beaker itself then supports
a number of backends (i.e. file, memory, memcached, etc.)
The construction of a :class:`.Cache` is part of the mechanics
of a :class:`.Template`, and programmatic access to this
cache is typically via the :attr:`.Template.cache` attribute.
"""
impl = None
"""Provide the :class:`.CacheImpl` in use by this :class:`.Cache`.
This accessor allows a :class:`.CacheImpl` with additional
methods beyond that of :class:`.Cache` to be used programmatically.
"""
id = None
"""Return the 'id' that identifies this cache.
This is a value that should be globally unique to the
:class:`.Template` associated with this cache, and can
be used by a caching system to name a local container
for data specific to this template.
"""
starttime = None
"""Epochal time value for when the owning :class:`.Template` was
first compiled.
A cache implementation may wish to invalidate data earlier than
this timestamp; this has the effect of the cache for a specific
:class:`.Template` starting clean any time the :class:`.Template`
is recompiled, such as when the original template file changed on
the filesystem.
"""
def __init__(self, template, *args):
# check for a stale template calling the
# constructor
if isinstance(template, compat.string_types) and args:
return
self.template = template
self.id = template.module.__name__
self.starttime = template.module._modified_time
self._def_regions = {}
self.impl = self._load_impl(self.template.cache_impl)
def _load_impl(self, name):
return _cache_plugins.load(name)(self)
def get_or_create(self, key, creation_function, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value."""
return self._ctx_get_or_create(key, creation_function, None, **kw)
def _ctx_get_or_create(self, key, creation_function, context, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value."""
if not self.template.cache_enabled:
return creation_function()
return self.impl.get_or_create(key,
creation_function,
**self._get_cache_kw(kw, context))
def set(self, key, value, **kw):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
self.impl.set(key, value, **self._get_cache_kw(kw, None))
put = set
"""A synonym for :meth:`.Cache.set`.
This is here for backwards compatibility.
"""
def get(self, key, **kw):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
return self.impl.get(key, **self._get_cache_kw(kw, None))
def invalidate(self, key, **kw):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
self.impl.invalidate(key, **self._get_cache_kw(kw, None))
def invalidate_body(self):
"""Invalidate the cached content of the "body" method for this
template.
"""
self.invalidate('render_body', __M_defname='render_body')
def invalidate_def(self, name):
"""Invalidate the cached content of a particular ``<%def>`` within this
template.
"""
self.invalidate('render_%s' % name, __M_defname='render_%s' % name)
def invalidate_closure(self, name):
"""Invalidate a nested ``<%def>`` within this template.
Caching of nested defs is a blunt tool as there is no
management of scope -- nested defs that use cache tags
need to have names unique of all other nested defs in the
template, else their content will be overwritten by
each other.
"""
self.invalidate(name, __M_defname=name)
def _get_cache_kw(self, kw, context):
defname = kw.pop('__M_defname', None)
if not defname:
tmpl_kw = self.template.cache_args.copy()
tmpl_kw.update(kw)
elif defname in self._def_regions:
tmpl_kw = self._def_regions[defname]
else:
tmpl_kw = self.template.cache_args.copy()
tmpl_kw.update(kw)
self._def_regions[defname] = tmpl_kw
if context and self.impl.pass_context:
tmpl_kw = tmpl_kw.copy()
tmpl_kw.setdefault('context', context)
return tmpl_kw
class CacheImpl(object):
"""Provide a cache implementation for use by :class:`.Cache`."""
def __init__(self, cache):
self.cache = cache
pass_context = False
"""If ``True``, the :class:`.Context` will be passed to
:meth:`get_or_create <.CacheImpl.get_or_create>` as the name ``'context'``.
"""
def get_or_create(self, key, creation_function, **kw):
"""Retrieve a value from the cache, using the given creation function
to generate a new value.
This function *must* return a value, either from
the cache, or via the given creation function.
If the creation function is called, the newly
created value should be populated into the cache
under the given key before being returned.
:param key: the value's key.
:param creation_function: function that when called generates
a new value.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def set(self, key, value, **kw):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def get(self, key, **kw):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
def invalidate(self, key, **kw):
"""Invalidate a value in the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments.
"""
raise NotImplementedError()
| |
# -*- coding: utf-8 -*-
from Instanssi.common.http import Http403
from Instanssi.common.auth import staff_access_required
from django.shortcuts import get_object_or_404
from django.http import Http404, HttpResponseRedirect
from django.urls import reverse
from Instanssi.kompomaatti.models import *
from Instanssi.arkisto.models import OtherVideo, OtherVideoCategory
from Instanssi.admin_arkisto.forms import VideoForm, VideoCategoryForm
from Instanssi.admin_arkisto.misc import utils
from Instanssi.admin_base.misc.custom_render import admin_render
# Logging related
import logging
logger = logging.getLogger(__name__)
@staff_access_required
def index(request, sel_event_id):
# Render response
return admin_render(request, "admin_arkisto/index.html", {
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def removeoldvotes(request, sel_event_id):
# Check rights
if not request.user.has_perms('kompomaatti.delete_vote'):
raise Http403
# Don't proceed if the event is still ongoing
event = get_object_or_404(Event, pk=int(sel_event_id))
if utils.is_event_ongoing(event):
raise Http404
# Find compos belonging to this event
compo_ids = Compo.objects.filter(event_id=int(sel_event_id)).values('pk')
# Don't allow removing votes if votes haven't yet been consolidated to entry rows (prevent data loss)
if utils.is_votes_unoptimized(compo_ids):
raise Http404
# Delete votes belonging to compos in this event
for group in VoteGroup.objects.filter(compo__in=compo_ids):
group.delete_votes()
group.delete()
# Log it
logger.info('Event old votes removed.', extra={'user': request.user, 'event': event})
# All done, redirect
return HttpResponseRedirect(reverse('manage-arkisto:archiver', args=(sel_event_id,)))
@staff_access_required
def transferrights(request, sel_event_id):
# Check rights
if not request.user.has_perms('kompomaatti.change_entry'):
raise Http403
# Don't allow this function if the event is still ongoing
event = get_object_or_404(Event, pk=int(sel_event_id))
if utils.is_event_ongoing(event):
raise Http404
# Get archive user, compo id's and competition id's
archiveuser = get_object_or_404(User, username="arkisto")
compo_ids = Compo.objects.filter(event_id=int(sel_event_id)).values('pk')
competition_ids = Competition.objects.filter(event_id=int(sel_event_id)).values('pk')
# Transfer all user rights on entries and competition participations belonging to this event
Entry.objects.filter(compo__in=compo_ids).update(user=archiveuser)
CompetitionParticipation.objects.filter(competition__in=competition_ids).update(user=archiveuser)
# Log it
logger.info('Event rights transferred.', extra={'user': request.user, 'event': event})
# All done, redirect
return HttpResponseRedirect(reverse('manage-arkisto:archiver', args=(sel_event_id,)))
@staff_access_required
def optimizescores(request, sel_event_id):
# Check rights
if not request.user.has_perms('kompomaatti.change_entry'):
raise Http403
# Don't allow this function if the event is still ongoing
event = get_object_or_404(Event, pk=int(sel_event_id))
if utils.is_event_ongoing(event):
raise Http404
# Get compo id's
compo_ids = Compo.objects.filter(event_id=int(sel_event_id)).values('pk')
# Set score and rank to database, instead of having to calculate it every time we need it
entries = Entry.objects.filter(compo__in=compo_ids)
for entry in entries:
entry.archive_rank = entry.get_rank()
entry.archive_score = entry.get_score()
entry.save()
# Log it
logger.info('Event scores optimized.', extra={'user': request.user, 'event': event})
return HttpResponseRedirect(reverse('manage-arkisto:archiver', args=(sel_event_id,)))
@staff_access_required
def archiver(request, sel_event_id):
# Get event information
event = get_object_or_404(Event, pk=sel_event_id)
# Get archive user information for future use
archiveuser = get_object_or_404(User, username="arkisto")
# Get Compo id's belonging to this event for future use
compo_ids = Compo.objects.filter(event_id=int(sel_event_id)).values('pk')
# Check if there are any compo entries that are not owner by archive user
untransferred = False
entries = Entry.objects.filter(compo__in=compo_ids)
for entry in entries:
if entry.user != archiveuser:
untransferred = True
break
# Check if there are any participations that are not owner by archive user
if not untransferred:
competition_ids = Competition.objects.filter(event_id=int(sel_event_id)).values('pk')
participations = CompetitionParticipation.objects.filter(competition__in=competition_ids)
for part in participations:
if part.user != archiveuser:
untransferred = True
break
# Check if voting results need to be optimized
votes_unoptimized = utils.is_votes_unoptimized(compo_ids)
# Check if event is still ongoing
ongoing_activity = utils.is_event_ongoing(event)
# See if there are any old votes left
old_votes_found = False
votes = Vote.objects.filter(compo__in=compo_ids)
if len(votes) > 0:
old_votes_found = True
# Render response
return admin_render(request, "admin_arkisto/archiver.html", {
'selected_event_id': int(sel_event_id),
'is_archived': event.archived,
'untransferred': untransferred,
'ongoing_activity': ongoing_activity,
'votes_unoptimized': votes_unoptimized,
'old_votes_found': old_votes_found,
})
@staff_access_required
def show(request, sel_event_id):
# Check rights
if not request.user.has_perms('kompomaatti.change_event'):
raise Http403
# Mark event as archived
event = get_object_or_404(Event, pk=sel_event_id)
event.archived = True
event.save()
# Log it
logger.info('Event set as visible in archive.', extra={'user': request.user, 'event': event})
return HttpResponseRedirect(reverse('manage-arkisto:archiver', args=(sel_event_id,)))
@staff_access_required
def hide(request, sel_event_id):
# Check rights
if not request.user.has_perms('kompomaatti.change_event'):
raise Http403
# Mark event as NOT archived
event = get_object_or_404(Event, pk=sel_event_id)
event.archived = False
event.save()
# Log it
logger.info('Event set as hidden in archive.', extra={'user': request.user, 'event': event})
return HttpResponseRedirect(reverse('manage-arkisto:archiver', args=(sel_event_id,)))
@staff_access_required
def vids(request, sel_event_id):
# Get event
event = get_object_or_404(Event, pk=sel_event_id)
# Handle form
if request.method == "POST":
# Check for permissions
if not request.user.has_perm('arkisto.add_othervideo'):
raise Http403
# Handle form
vidform = VideoForm(request.POST, event=event)
if vidform.is_valid():
video = vidform.save()
logger.info('Added archive video {}'.format(video.name),
extra={'user': request.user, 'event': event})
return HttpResponseRedirect(reverse('manage-arkisto:vids', args=(sel_event_id,)))
else:
vidform = VideoForm(event=event)
# Get videos belonging to selected event
categories = OtherVideoCategory.objects.filter(event_id=int(sel_event_id))
videos = []
for cat in categories:
vlist = OtherVideo.objects.filter(category=cat)
for video in vlist:
videos.append(video)
# Render response
return admin_render(request, "admin_arkisto/vids.html", {
'videos': videos,
'vidform': vidform,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def editvid(request, sel_event_id, video_id):
# Check for permissions
if not request.user.has_perm('arkisto.change_othervideo'):
raise Http403
# Get Video
video = get_object_or_404(OtherVideo, pk=video_id)
# Get event
event = get_object_or_404(Event, pk=sel_event_id)
# Handle form
if request.method == "POST":
vidform = VideoForm(request.POST, instance=video, event=event)
if vidform.is_valid():
r_video = vidform.save()
logger.info('Edited archive video {}'.format(r_video.name),
extra={'user': request.user, 'event': event})
return HttpResponseRedirect(reverse('manage-arkisto:vids', args=(sel_event_id,)))
else:
vidform = VideoForm(instance=video, event=event)
# Render response
return admin_render(request, "admin_arkisto/editvid.html", {
'vidform': vidform,
'vid': video,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def deletevid(request, sel_event_id, video_id):
# Check for permissions
if not request.user.has_perm('arkisto.delete_othervideo'):
raise Http403
# Get event
event = get_object_or_404(Event, pk=sel_event_id)
# Attempt to delete video
try:
video = OtherVideo.objects.get(id=video_id)
video.delete()
logger.info('Deleted archive video {}'.format(video.name),
extra={'user': request.user, 'event': event})
except OtherVideo.DoesNotExist:
pass
# Redirect
return HttpResponseRedirect(reverse('manage-arkisto:vids', args=(sel_event_id,)))
@staff_access_required
def cats(request, sel_event_id):
# Get event
event = get_object_or_404(Event, pk=sel_event_id)
# Handle form
if request.method == "POST":
# Check for permissions
if not request.user.has_perm('arkisto.add_othervideocategory'):
raise Http403
# Handle form
catform = VideoCategoryForm(request.POST)
if catform.is_valid():
cat = catform.save(commit=False)
cat.event = event
cat.save()
logger.info('Added archive video category '.format(cat.name),
extra={'user': request.user, 'event': event})
return HttpResponseRedirect(reverse('manage-arkisto:vidcats', args=(sel_event_id,)))
else:
catform = VideoCategoryForm()
# Get videos belonging to selected event
categories = OtherVideoCategory.objects.filter(event_id=int(sel_event_id))
# Render response
return admin_render(request, "admin_arkisto/cats.html", {
'categories': categories,
'catform': catform,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def editcat(request, sel_event_id, category_id):
# Check for permissions
if not request.user.has_perm('arkisto.change_othervideocategory'):
raise Http403
# Get event
event = get_object_or_404(Event, pk=sel_event_id)
# Get category
category = get_object_or_404(OtherVideoCategory, pk=category_id, event=event)
# Handle form
if request.method == "POST":
catform = VideoCategoryForm(request.POST, instance=category)
if catform.is_valid():
r_cat = catform.save()
logger.info('Edited archive video category {}'.format(r_cat.name),
extra={'user': request.user, 'event': event})
return HttpResponseRedirect(reverse('manage-arkisto:vidcats', args=(sel_event_id,)))
else:
catform = VideoCategoryForm(instance=category)
# Render response
return admin_render(request, "admin_arkisto/editcat.html", {
'catform': catform,
'cat': category,
'selected_event_id': int(sel_event_id),
})
@staff_access_required
def deletecat(request, sel_event_id, category_id):
# Check for permissions
if not request.user.has_perm('arkisto.delete_othervideocategory'):
raise Http403
event = get_object_or_404(Event, pk=sel_event_id)
# Attempt to delete category
try:
cat = OtherVideoCategory.objects.get(id=category_id, event=event)
cat.delete()
logger.info('Deleted archive video category {}'.format(cat.name),
extra={'user': request.user, 'event': event})
except OtherVideoCategory.DoesNotExist:
pass
# Redirect
return HttpResponseRedirect(reverse('manage-arkisto:vidcats', args=(sel_event_id,)))
| |
"""KDDCUP 99 dataset.
A classic dataset for anomaly detection.
The dataset page is available from UCI Machine Learning Repository
https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
"""
import errno
import logging
import os
import sys
from gzip import GzipFile
from io import BytesIO
from os.path import exists, join
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import numpy as np
from .base import get_data_home
from .base import Bunch
from ..externals import joblib, six
from ..utils import check_random_state
from ..utils import shuffle as shuffle_method
URL10 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz')
URL = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/kddcup99-mld/kddcup.data.gz')
logger = logging.getLogger()
def fetch_kddcup99(subset=None, shuffle=False, random_state=None,
percent10=True, download_if_missing=True):
"""Load and return the kddcup 99 dataset (classification).
The KDD Cup '99 dataset was created by processing the tcpdump portions
of the 1998 DARPA Intrusion Detection System (IDS) Evaluation dataset,
created by MIT Lincoln Lab [1] . The artificial data was generated using
a closed network and hand-injected attacks to produce a large number of
different types of attack with normal activity in the background.
As the initial goal was to produce a large training set for supervised
learning algorithms, there is a large proportion (80.1%) of abnormal
data which is unrealistic in real world, and inappropriate for unsupervised
anomaly detection which aims at detecting 'abnormal' data, ie
1) qualitatively different from normal data.
2) in large minority among the observations.
We thus transform the KDD Data set into two different data sets: SA and SF.
- SA is obtained by simply selecting all the normal data, and a small
proportion of abnormal data to gives an anomaly proportion of 1%.
- SF is obtained as in [2]
by simply picking up the data whose attribute logged_in is positive, thus
focusing on the intrusion attack, which gives a proportion of 0.3% of
attack.
- http and smtp are two subsets of SF corresponding with third feature
equal to 'http' (resp. to 'smtp')
General KDD structure :
================ ==========================================
Samples total 4898431
Dimensionality 41
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
SA structure :
================ ==========================================
Samples total 976158
Dimensionality 41
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
SF structure :
================ ==========================================
Samples total 699691
Dimensionality 4
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
http structure :
================ ==========================================
Samples total 619052
Dimensionality 3
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
smtp structure :
================ ==========================================
Samples total 95373
Dimensionality 3
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
Parameters
----------
subset : None, 'SA', 'SF', 'http', 'smtp'
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
percent10 : bool, default=False
Whether to load only 10 percent of the data.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
References
----------
.. [1] Analysis and Results of the 1999 DARPA Off-Line Intrusion
Detection Evaluation Richard Lippmann, Joshua W. Haines,
David J. Fried, Jonathan Korba, Kumar Das
.. [2] A Geometric Framework for Unsupervised Anomaly Detection: Detecting
Intrusions in Unlabeled Data (2002) by Eleazar Eskin, Andrew Arnold,
Michael Prerau, Leonid Portnoy, Sal Stolfo
"""
kddcup99 = _fetch_brute_kddcup99(shuffle=shuffle, percent10=percent10,
download_if_missing=download_if_missing)
data = kddcup99.data
target = kddcup99.target
if subset == 'SA':
s = target == b'normal.'
t = np.logical_not(s)
normal_samples = data[s, :]
normal_targets = target[s]
abnormal_samples = data[t, :]
abnormal_targets = target[t]
n_samples_abnormal = abnormal_samples.shape[0]
# selected abnormal samples:
random_state = check_random_state(random_state)
r = random_state.randint(0, n_samples_abnormal, 3377)
abnormal_samples = abnormal_samples[r]
abnormal_targets = abnormal_targets[r]
data = np.r_[normal_samples, abnormal_samples]
target = np.r_[normal_targets, abnormal_targets]
if subset == 'SF' or subset == 'http' or subset == 'smtp':
# select all samples with positive logged_in attribute:
s = data[:, 11] == 1
data = np.c_[data[s, :11], data[s, 12:]]
target = target[s]
data[:, 0] = np.log((data[:, 0] + 0.1).astype(float))
data[:, 4] = np.log((data[:, 4] + 0.1).astype(float))
data[:, 5] = np.log((data[:, 5] + 0.1).astype(float))
if subset == 'http':
s = data[:, 2] == b'http'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
if subset == 'smtp':
s = data[:, 2] == b'smtp'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
if subset == 'SF':
data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]]
return Bunch(data=data, target=target)
def _fetch_brute_kddcup99(subset=None, data_home=None,
download_if_missing=True, random_state=None,
shuffle=False, percent10=False):
"""Load the kddcup99 dataset, downloading it if necessary.
Parameters
----------
subset : None, 'SA', 'SF', 'http', 'smtp'
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
data_home : string, optional
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : boolean, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
percent10 : bool, default=False
Whether to load only 10 percent of the data.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (494021, 41)
Each row corresponds to the 41 features in the dataset.
dataset.target : numpy array of shape (494021,)
Each value corresponds to one of the 21 attack types or to the
label 'normal.'.
dataset.DESCR : string
Description of the kddcup99 dataset.
"""
data_home = get_data_home(data_home=data_home)
if sys.version_info[0] == 3:
# The zlib compression format use by joblib is not compatible when
# switching from Python 2 to Python 3, let us use a separate folder
# under Python 3:
dir_suffix = "-py3"
else:
# Backward compat for Python 2 users
dir_suffix = ""
if percent10:
kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix)
else:
kddcup_dir = join(data_home, "kddcup99" + dir_suffix)
samples_path = join(kddcup_dir, "samples")
targets_path = join(kddcup_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
_mkdirp(kddcup_dir)
URL_ = URL10 if percent10 else URL
logger.warning("Downloading %s" % URL_)
f = BytesIO(urlopen(URL_).read())
dt = [('duration', int),
('protocol_type', 'S4'),
('service', 'S11'),
('flag', 'S6'),
('src_bytes', int),
('dst_bytes', int),
('land', int),
('wrong_fragment', int),
('urgent', int),
('hot', int),
('num_failed_logins', int),
('logged_in', int),
('num_compromised', int),
('root_shell', int),
('su_attempted', int),
('num_root', int),
('num_file_creations', int),
('num_shells', int),
('num_access_files', int),
('num_outbound_cmds', int),
('is_host_login', int),
('is_guest_login', int),
('count', int),
('srv_count', int),
('serror_rate', float),
('srv_serror_rate', float),
('rerror_rate', float),
('srv_rerror_rate', float),
('same_srv_rate', float),
('diff_srv_rate', float),
('srv_diff_host_rate', float),
('dst_host_count', int),
('dst_host_srv_count', int),
('dst_host_same_srv_rate', float),
('dst_host_diff_srv_rate', float),
('dst_host_same_src_port_rate', float),
('dst_host_srv_diff_host_rate', float),
('dst_host_serror_rate', float),
('dst_host_srv_serror_rate', float),
('dst_host_rerror_rate', float),
('dst_host_srv_rerror_rate', float),
('labels', 'S16')]
DT = np.dtype(dt)
file_ = GzipFile(fileobj=f, mode='r')
Xy = []
for line in file_.readlines():
if six.PY3:
line = line.decode()
Xy.append(line.replace('\n', '').split(','))
file_.close()
print('extraction done')
Xy = np.asarray(Xy, dtype=object)
for j in range(42):
Xy[:, j] = Xy[:, j].astype(DT[j])
X = Xy[:, :-1]
y = Xy[:, -1]
# XXX bug when compress!=0:
# (error: 'Incorrect data length while decompressing[...] the file
# could be corrupted.')
joblib.dump(X, samples_path, compress=0)
joblib.dump(y, targets_path, compress=0)
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
X, y = shuffle_method(X, y, random_state=random_state)
return Bunch(data=X, target=y, DESCR=__doc__)
def _mkdirp(d):
"""Ensure directory d exists (like mkdir -p on Unix)
No guarantee that the directory is writable.
"""
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise
| |
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
from .. import __author__, __version__
from ..core import np as np
import operator as op
class Funlib(object):
""" Funlib
This class must be loaded by xl2py.core.processor as Funlib to parse
simple lambda-conversion references or customized functions (e.g. pyxl_error)
"""
def __init__(self):
# xl to py formulas conversion for eval()
self.__author__ = __author__
self.__version__ = __version__
# xl to py formula conversion
self.fun_database = {
'IF' : lambda args : [args[0]*args[1]+(abs(args[0]-1)*args[2])][0],\
'AVERAGE' : lambda args : np.average(args[0]),\
'STDEV.P' : lambda args : np.std(args[0]),\
'TRANSPOSE' : lambda args : np.transpose(args[0]),\
'ABS' : lambda args : np.abs(args[0]),\
'MMULT' : lambda args : np.dot(*args),\
'IFERROR' : lambda args : self.pyxl_error(*args),\
'SUM' : lambda args : np.sum(args[0]),\
'COUNT' : lambda args : np.size(args[0]),\
'SQRT' : lambda args : np.sqrt(args[0]),\
'^' : lambda args : np.power(*args),\
'<' : lambda args : np.float64(op.lt(*args)),\
'>' : lambda args : np.float64(op.gt(*args)),\
'<=' : lambda args : np.float64(op.le(*args)),\
'>=' : lambda args : np.float64(op.ge(*args)),\
'<>' : lambda args : np.float64(op.ne(*args)),\
'=' : lambda args : np.float64(op.eq(*args)),\
'+' : lambda args : np.add(*args),\
'-' : lambda args : np.subtract(*args),\
'/' : lambda args : np.divide(*args),\
'*' : lambda args : np.multiply(*args)
}
# Further go all user-defined functions.
# Which can be those that do not have a corresponding function in numpy and, therefore, has to be shaped verisimilarly.
def pyxl_error(self,x,y):
""" pyxl_error (substitute for XL fun IFERR)
params (2):
x as numeric or numeric array
y as numeric
returns x with nan and inf values converted to y
"""
if any(np.isnan(x)+np.isinf(x)):
x[np.isnan(x)+np.isinf(x)] = y[0][0]
return x
Funlib_obj = Funlib()
class NumObj(object):
def __init__(self,numeric):
self.val = float(numeric)
self.__hasoutput__ = False # True if sequence is to output values to the structure
self.output = {'nWB':[],'nWS':[],'R':[],'C':[]}
def set_output(self,nWB,nWS,R,C):
self.output['nWB'] = nWB
self.output['nWS'] = nWS
self.output['R'] = R
self.output['C'] = C
self.__hasoutput__ = True
def __call__(self):
val = np.array(self.val, ndmin=2)
return val
class RefObj(object):
def __init__(self,struct_ref,nWB,nWS,R,C):
self.struct_ref = struct_ref
self.shape = [np.ptp(R)+1,np.ptp(C)+1]
self.ref = [nWB,nWS,R,C]
self.__hasoutput__ = False # True if sequence is to output values to the structure
self.output = {'nWB':[],'nWS':[],'R':[],'C':[]}
def set_output(self,nWB,nWS,R,C):
self.output['nWB'] = nWB
self.output['nWS'] = nWS
self.output['R'] = R
self.output['C'] = C
self.__hasoutput__ = True
def __call__(self):
val = np.reshape([self.struct_ref[self.ref[0]][self.ref[1]][r][c] for r in range(min(self.ref[2]),max(self.ref[2])+1)\
for c in range(min(self.ref[3]),max(self.ref[3])+1)],self.shape)
return val
class FunObj(object):
def __init__(self,funstr,params):
self.funstr = funstr
self.params = params
self.__hasoutput__ = False # True if sequence is to output values to the structure
self.output = {'nWB':[],'nWS':[],'R':[],'C':[]}
def set_output(self,nWB,nWS,R,C):
self.output['nWB'] = nWB
self.output['nWS'] = nWS
self.output['R'] = R
self.output['C'] = C
self.__hasoutput__ = True
def __call__(self):
val = Funlib_obj.fun_database[self.funstr]([p() for p in self.params])
return np.array(val,ndmin=2)
class CalcBlock(object):
def __init__(self,calc_block,formula0):
self.calc_block = calc_block
# in case it starts with a sign operand
if self.calc_block[0] in ['-','+']:
self.calc_block[1].val = Funlib_obj.fun_database[self.calc_block[0]]([0,self.calc_block[1].val])
self.calc_block = self.calc_block[1:]
else:
pass
self.formula = formula0
self.sequence = []
self.__hasoutput__ = False # True if sequence is to output values to the structure
self.__get_sequence()
self.output = {'nWB':[],'nWS':[],'R':[],'C':[]}
def __lambdify(self,operator,params):
return Funlib_obj.fun_database[operator](params)
def __rectify(self,vals,index0):
return vals[0:index0+1]+vals[index0+3:]
def __get_sequence(self):
classes = [['^'],['/','*'],['-','+'],['>','<','<=','>=','<>','=']]
reference = range(len(self.calc_block))
operators = [self.calc_block[i] if i%2==1 else '' for i in range(len(self.calc_block))]
for ops in classes:
for o in ops:
while operators.__contains__(o):
ref = operators.index(o)
references = [reference[ref-1],reference[ref+1]]
self.sequence += [[o, references, ref-1]]
for i in range(ref+2,len(reference)):
reference[i] -= 2
reference = reference[0:ref] + reference[ref+2:]
operators = operators[0:ref] + operators[ref+2:]
def set_output(self,nWB,nWS,R,C):
self.output['nWB'] = nWB
self.output['nWS'] = nWS
self.output['R'] = R
self.output['C'] = C
self.__hasoutput__ = True
def __call__(self):
if len(self.sequence)>0:
vals = [c for c in self.calc_block] # create a calc-block copy to place evaulated objects
for seq in self.sequence:
params = []
for s in seq[1]:
if hasattr(vals[s],'__call__'):
params.append(vals[s]())
else:
params.append(vals[s])
vals[seq[2]] = self.__lambdify(seq[0],params)
vals = self.__rectify(vals,seq[2])
else:
vals = self.calc_block
vals = vals[0]
return vals
class CalcHandler(object):
def __init__(self,struct_ref,diagnose_threshold = 1e-10):
self.struct_ref = struct_ref
self.diagnose_threshold = diagnose_threshold
def diagnose(self,obj):
if obj.__hasoutput__:
value = obj.__call__()
output = []
flag = True
for r in range(min(obj.output['R']),max(obj.output['R'])+1):
for c in range(min(obj.output['C']),max(obj.output['C'])+1):
v0 = self.struct_ref[obj.output['nWB']][obj.output['nWS']][r][c]
vf = value[r-min(obj.output['R'])][c-min(obj.output['C'])]
if np.divide(abs(v0 - vf),v0)>self.diagnose_threshold:
flag = False
print('Original:', v0, 'Calculated:', vf)
output.append([obj.output['nWB'],obj.output['nWS'],r,c,v0,vf,np.divide(abs(v0 - vf),v0)])
return flag, output
else:
raise Exception("Object is not output'able'.")
def execute(self,obj):
if obj.__hasoutput__:
value = obj.__call__()
for r in range(min(obj.output['R']),max(obj.output['R'])+1):
for c in range(min(obj.output['C']),max(obj.output['C'])+1):
self.struct_ref[obj.output['nWB']][obj.output['nWS']][r][c] = float(value[r-min(obj.output['R'])][c-min(obj.output['C'])])
else:
raise Exception("Object is not output'able'.")
| |
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This modules implements the engine for building packages in parallel"""
import os
import pkg_resources
from queue import Queue
import sys
import time
import traceback
import yaml
import asyncio
try:
from catkin_pkg.package import parse_package
from catkin_pkg.package import InvalidPackage
from catkin_pkg.packages import find_packages
from catkin_pkg.topological_order import topological_order_packages
except ImportError as e:
sys.exit(
'Importing "catkin_pkg" failed: %s\nMake sure that you have installed '
'"catkin_pkg", and that it is up to date and on the PYTHONPATH.' % e
)
from catkin_tools.common import FakeLock, expand_glob_package
from catkin_tools.common import format_time_delta
from catkin_tools.common import get_cached_recursive_build_depends_in_workspace
from catkin_tools.common import get_recursive_run_depends_in_workspace
from catkin_tools.common import log
from catkin_tools.common import wide_log
from catkin_tools.execution.controllers import ConsoleStatusController
from catkin_tools.execution.executor import execute_jobs
from catkin_tools.execution.executor import run_until_complete
from catkin_tools.jobs.catkin import create_catkin_build_job
from catkin_tools.jobs.catkin import create_catkin_clean_job
from catkin_tools.jobs.catkin import get_prebuild_package
from .color import clr
BUILDSPACE_MARKER_FILE = '.catkin_tools.yaml'
BUILDSPACE_IGNORE_FILE = 'CATKIN_IGNORE'
DEVELSPACE_MARKER_FILE = '.catkin_tools.yaml'
def determine_packages_to_be_built(packages, context, workspace_packages):
"""Returns list of packages which should be built, and those package's deps.
:param packages: list of packages to be built, if None all packages are built
:type packages: list
:param context: Workspace context
:type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
:param workspace_packages: list of all packages in the workspace
:type workspace_packages: list
:returns: tuple of packages to be built and those package's deps
:rtype: tuple
"""
start = time.time()
# If there are no packages raise
if not workspace_packages:
log("[build] No packages were found in the source space '{0}'".format(context.source_space_abs))
else:
wide_log("[build] Found '{0}' packages in {1}."
.format(len(workspace_packages), format_time_delta(time.time() - start)))
# Order the packages by topology
ordered_packages = topological_order_packages(workspace_packages)
# Set the packages in the workspace for the context
context.packages = ordered_packages
# Determine the packages which should be built
packages_to_be_built = []
packages_to_be_built_deps = []
# Check if topological_order_packages determined any circular dependencies, if so print an error and fail.
# If this is the case, the last entry of ordered packages is a tuple that starts with nil.
if ordered_packages and ordered_packages[-1][0] is None:
guilty_packages = ", ".join(ordered_packages[-1][1:])
sys.exit("[build] Circular dependency detected in the following packages: {}".format(guilty_packages))
workspace_package_names = dict([(pkg.name, (path, pkg)) for path, pkg in ordered_packages])
# Determine the packages to be built
if packages:
# First assert all of the packages given are in the workspace
for package in packages:
if package not in workspace_package_names:
# Try whether package is a pattern and matches
glob_packages = expand_glob_package(package, workspace_package_names)
if len(glob_packages) > 0:
packages.extend(glob_packages)
continue
else:
sys.exit("[build] Given package '{0}' is not in the workspace "
"and pattern does not match any package".format(package))
# If metapackage, include run depends which are in the workspace
package_obj = workspace_package_names[package][1]
if 'metapackage' in [e.tagname for e in package_obj.exports]:
for rdep in package_obj.run_depends:
if rdep.name in workspace_package_names:
packages.append(rdep.name)
# Limit the packages to be built to just the provided packages
for pkg_path, package in ordered_packages:
if package.name in packages:
packages_to_be_built.append((pkg_path, package))
# Get the recursive dependencies for each of these packages
pkg_deps = get_cached_recursive_build_depends_in_workspace(package, ordered_packages)
packages_to_be_built_deps.extend(pkg_deps)
else:
# Only use buildlist when no other packages are specified
if len(context.buildlist) > 0:
# Expand glob patterns in buildlist
buildlist = []
for buildlisted_package in context.buildlist:
buildlist.extend(expand_glob_package(buildlisted_package, workspace_package_names))
packages_to_be_built = [p for p in ordered_packages if (p[1].name in buildlist)]
else:
packages_to_be_built = ordered_packages
# Filter packages with skiplist
if len(context.skiplist) > 0:
# Expand glob patterns in skiplist
skiplist = []
for skiplisted_package in context.skiplist:
skiplist.extend(expand_glob_package(skiplisted_package, workspace_package_names))
# Apply skiplist to packages and dependencies
packages_to_be_built = [
(path, pkg) for path, pkg in packages_to_be_built
if (pkg.name not in skiplist or pkg.name in packages)]
packages_to_be_built_deps = [
(path, pkg) for path, pkg in packages_to_be_built_deps
if (pkg.name not in skiplist or pkg.name in packages)]
return packages_to_be_built, packages_to_be_built_deps, ordered_packages
def verify_start_with_option(start_with, packages, all_packages, packages_to_be_built):
if start_with is not None:
if start_with not in [pkg.name for pth, pkg in all_packages]:
sys.exit("Package given for --start-with, '{0}', is not in the workspace.".format(start_with))
elif start_with not in [pkg.name for pth, pkg in packages_to_be_built]:
sys.exit("Package given for --start-with, '{0}', "
"is in the workspace but would not be built with given package arguments: '{1}'"
.format(start_with, ' '.join(packages)))
def get_built_unbuilt_packages(context, workspace_packages):
"""Get list of packages in workspace which have not been built."""
# Get the names of all packages which have already been built
built_packages = set([
pkg.name for (path, pkg) in
find_packages(context.package_metadata_path(), warnings=[]).items()])
# Get names of all unbuilt packages
unbuilt_pkgs = set()
for path, pkg in workspace_packages.items():
if pkg.name not in built_packages:
unbuilt_pkgs.add(pkg.name)
return built_packages, unbuilt_pkgs
def build_isolated_workspace(
context,
packages=None,
start_with=None,
no_deps=False,
unbuilt=False,
n_jobs=None,
force_cmake=False,
pre_clean=False,
force_color=False,
quiet=False,
interleave_output=False,
no_status=False,
limit_status_rate=10.0,
lock_install=False,
no_notify=False,
continue_on_failure=False,
summarize_build=None,
):
"""Builds a catkin workspace in isolation
This function will find all of the packages in the source space, start some
executors, feed them packages to build based on dependencies and topological
ordering, and then monitor the output of the executors, handling loggings of
the builds, starting builds, failing builds, and finishing builds of
packages, and handling the shutdown of the executors when appropriate.
:param context: context in which to build the catkin workspace
:type context: :py:class:`catkin_tools.verbs.catkin_build.context.Context`
:param packages: list of packages to build, by default their dependencies will also be built
:type packages: list
:param start_with: package to start with, skipping all packages which proceed it in the topological order
:type start_with: str
:param no_deps: If True, the dependencies of packages will not be built first
:type no_deps: bool
:param unbuilt: Handle unbuilt packages
:type unbuilt: bool
:param n_jobs: number of parallel package build n_jobs
:type n_jobs: int
:param force_cmake: forces invocation of CMake if True, default is False
:type force_cmake: bool
:param pre_clean: Clean current build before building
:type pre_clean: bool
:param force_color: forces colored output even if terminal does not support it
:type force_color: bool
:param quiet: suppresses the output of commands unless there is an error
:type quiet: bool
:param interleave_output: prints the output of commands as they are received
:type interleave_output: bool
:param no_status: disables status bar
:type no_status: bool
:param limit_status_rate: rate to which status updates are limited; the default 0, places no limit.
:type limit_status_rate: float
:param lock_install: causes executors to synchronize on access of install commands
:type lock_install: bool
:param no_notify: suppresses system notifications
:type no_notify: bool
:param continue_on_failure: do not stop building other jobs on error
:type continue_on_failure: bool
:param summarize_build: if True summarizes the build at the end, if None and continue_on_failure is True and the
the build fails, then the build will be summarized, but if False it never will be summarized.
:type summarize_build: bool
:raises: SystemExit if buildspace is a file or no packages were found in the source space
or if the provided options are invalid
"""
pre_start_time = time.time()
# Assert that the limit_status_rate is valid
if limit_status_rate < 0:
sys.exit("[build] @!@{rf}Error:@| The value of --limit-status-rate must be greater than or equal to zero.")
# Declare a buildspace marker describing the build config for error checking
buildspace_marker_data = {
'workspace': context.workspace,
'profile': context.profile,
'install': context.install,
'install_space': context.install_space_abs,
'devel_space': context.devel_space_abs,
'source_space': context.source_space_abs}
# Check build config
if os.path.exists(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)):
with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE)) as buildspace_marker_file:
existing_buildspace_marker_data = yaml.safe_load(buildspace_marker_file)
misconfig_lines = ''
for (k, v) in existing_buildspace_marker_data.items():
new_v = buildspace_marker_data.get(k, None)
if new_v != v:
misconfig_lines += (
'\n - %s: %s (stored) is not %s (commanded)' %
(k, v, new_v))
if len(misconfig_lines) > 0:
sys.exit(clr(
"\n@{rf}Error:@| Attempting to build a catkin workspace using build space: "
"\"%s\" but that build space's most recent configuration "
"differs from the commanded one in ways which will cause "
"problems. Fix the following options or use @{yf}`catkin "
"clean -b`@| to remove the build space: %s" %
(context.build_space_abs, misconfig_lines)))
# Summarize the context
summary_notes = []
if force_cmake:
summary_notes += [clr("@!@{cf}NOTE:@| Forcing CMake to run for each package.")]
log(context.summary(summary_notes))
# Make sure there is a build folder and it is not a file
if os.path.exists(context.build_space_abs):
if os.path.isfile(context.build_space_abs):
sys.exit(clr(
"[build] @{rf}Error:@| " +
"Build space '{0}' exists but is a file and not a folder."
.format(context.build_space_abs)))
# If it doesn't exist, create it
else:
log("[build] Creating build space: '{0}'".format(context.build_space_abs))
os.makedirs(context.build_space_abs)
# Write the current build config for config error checking
with open(os.path.join(context.build_space_abs, BUILDSPACE_MARKER_FILE), 'w') as buildspace_marker_file:
buildspace_marker_file.write(yaml.dump(buildspace_marker_data, default_flow_style=False))
# Get all the packages in the context source space
# Suppress warnings since this is a utility function
try:
workspace_packages = find_packages(context.source_space_abs, exclude_subspaces=True, warnings=[])
except InvalidPackage as ex:
sys.exit(clr("@{rf}Error:@| The file %s is an invalid package.xml file."
" See below for details:\n\n%s" % (ex.package_path, ex.msg)))
# Get packages which have not been built yet
built_packages, unbuilt_pkgs = get_built_unbuilt_packages(context, workspace_packages)
# Handle unbuilt packages
if unbuilt:
# Check if there are any unbuilt
if len(unbuilt_pkgs) > 0:
# Add the unbuilt packages
packages.extend(list(unbuilt_pkgs))
else:
log("[build] No unbuilt packages to be built.")
return
# If no_deps is given, ensure packages to build are provided
if no_deps and packages is None:
log(clr("[build] @!@{rf}Error:@| With no_deps, you must specify packages to build."))
return
# Find list of packages in the workspace
packages_to_be_built, packages_to_be_built_deps, all_packages = determine_packages_to_be_built(
packages, context, workspace_packages)
if not no_deps:
# Extend packages to be built to include their deps
packages_to_be_built.extend(packages_to_be_built_deps)
# Also re-sort
try:
packages_to_be_built = topological_order_packages(dict(packages_to_be_built))
except AttributeError:
log(clr("[build] @!@{rf}Error:@| The workspace packages have a circular "
"dependency, and cannot be built. Please run `catkin list "
"--deps` to determine the problematic package(s)."))
return
# Check the number of packages to be built
if len(packages_to_be_built) == 0:
log(clr('[build] No packages to be built.'))
# Assert start_with package is in the workspace
verify_start_with_option(
start_with,
packages,
all_packages,
packages_to_be_built + packages_to_be_built_deps)
# Populate .catkin file if we're not installing
# NOTE: This is done to avoid the Catkin CMake code from doing it,
# which isn't parallel-safe. Catkin CMake only modifies this file if
# it's package source path isn't found.
if not context.install:
dot_catkin_file_path = os.path.join(context.devel_space_abs, '.catkin')
# If the file exists, get the current paths
if os.path.exists(dot_catkin_file_path):
dot_catkin_paths = open(dot_catkin_file_path, 'r').read().split(';')
else:
dot_catkin_paths = []
# Update the list with the new packages (in topological order)
packages_to_be_built_paths = [
os.path.join(context.source_space_abs, path)
for path, pkg in packages_to_be_built
]
new_dot_catkin_paths = [
os.path.join(context.source_space_abs, path)
for path in [os.path.join(context.source_space_abs, path) for path, pkg in all_packages]
if path in dot_catkin_paths or path in packages_to_be_built_paths
]
# Write the new file if it's different, otherwise, leave it alone
if dot_catkin_paths == new_dot_catkin_paths:
wide_log("[build] Package table is up to date.")
else:
wide_log("[build] Updating package table.")
open(dot_catkin_file_path, 'w').write(';'.join(new_dot_catkin_paths))
# Remove packages before start_with
if start_with is not None:
for path, pkg in list(packages_to_be_built):
if pkg.name != start_with:
wide_log(clr("@!@{pf}Skipping@| @{gf}---@| @{cf}{}@|").format(pkg.name))
packages_to_be_built.pop(0)
else:
break
# Get the names of all packages to be built
packages_to_be_built_names = [p.name for _, p in packages_to_be_built]
packages_to_be_built_deps_names = [p.name for _, p in packages_to_be_built_deps]
# Generate prebuild and prebuild clean jobs, if necessary
prebuild_jobs = {}
setup_util_present = os.path.exists(os.path.join(context.devel_space_abs, '_setup_util.py'))
if context.install:
setup_util_present &= os.path.exists(os.path.join(context.install_space_abs, '_setup_util.py'))
catkin_present = 'catkin' in (packages_to_be_built_names + packages_to_be_built_deps_names)
catkin_built = 'catkin' in built_packages
prebuild_built = 'catkin_tools_prebuild' in built_packages
# Handle the prebuild jobs if the develspace is linked
prebuild_pkg_deps = []
if context.link_devel:
prebuild_pkg = None
# Construct a dictionary to lookup catkin package by name
pkg_dict = dict([(pkg.name, (pth, pkg)) for pth, pkg in all_packages])
if setup_util_present:
# Setup util is already there, determine if it needs to be
# regenerated
if catkin_built:
if catkin_present:
prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
elif prebuild_built:
if catkin_present:
# TODO: Clean prebuild package
ct_prebuild_pkg_path = get_prebuild_package(
context.build_space_abs, context.devel_space_abs, force_cmake)
ct_prebuild_pkg = parse_package(ct_prebuild_pkg_path)
prebuild_jobs['caktin_tools_prebuild'] = create_catkin_clean_job(
context,
ct_prebuild_pkg,
ct_prebuild_pkg_path,
dependencies=[],
dry_run=False,
clean_build=True,
clean_devel=True,
clean_install=True)
# TODO: Build catkin package
prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
prebuild_pkg_deps.append('catkin_tools_prebuild')
else:
# How did these get here??
log("Warning: devel space setup files have an unknown origin.")
else:
# Setup util needs to be generated
if catkin_built or prebuild_built:
log("Warning: generated devel space setup files have been deleted.")
if catkin_present:
# Build catkin package
prebuild_pkg_path, prebuild_pkg = pkg_dict['catkin']
else:
# Generate and buildexplicit prebuild package
prebuild_pkg_path = get_prebuild_package(context.build_space_abs, context.devel_space_abs, force_cmake)
prebuild_pkg = parse_package(prebuild_pkg_path)
if prebuild_pkg is not None:
# Create the prebuild job
prebuild_job = create_catkin_build_job(
context,
prebuild_pkg,
prebuild_pkg_path,
dependencies=prebuild_pkg_deps,
force_cmake=force_cmake,
pre_clean=pre_clean,
prebuild=True)
# Add the prebuld job
prebuild_jobs[prebuild_job.jid] = prebuild_job
# Remove prebuild jobs from normal job list
for prebuild_jid, prebuild_job in prebuild_jobs.items():
if prebuild_jid in packages_to_be_built_names:
packages_to_be_built_names.remove(prebuild_jid)
# Initial jobs list is just the prebuild jobs
jobs = [] + list(prebuild_jobs.values())
# Get all build type plugins
build_job_creators = {
ep.name: ep.load()['create_build_job']
for ep in pkg_resources.iter_entry_points(group='catkin_tools.jobs')
}
# It's a problem if there aren't any build types available
if len(build_job_creators) == 0:
sys.exit('Error: No build types available. Please check your catkin_tools installation.')
# Construct jobs
for pkg_path, pkg in all_packages:
if pkg.name not in packages_to_be_built_names:
continue
# Get actual build deps
deps = [
p.name for _, p
in get_cached_recursive_build_depends_in_workspace(pkg, packages_to_be_built)
if p.name not in prebuild_jobs
]
# All jobs depend on the prebuild jobs if they're defined
if not no_deps:
for j in prebuild_jobs.values():
deps.append(j.jid)
# Determine the job parameters
build_job_kwargs = dict(
context=context,
package=pkg,
package_path=pkg_path,
dependencies=deps,
force_cmake=force_cmake,
pre_clean=pre_clean)
# Create the job based on the build type
build_type = pkg.get_build_type()
if build_type in build_job_creators:
jobs.append(build_job_creators[build_type](**build_job_kwargs))
else:
wide_log(clr(
"[build] @!@{yf}Warning:@| Skipping package `{}` because it "
"has an unsupported package build type: `{}`"
).format(pkg.name, build_type))
wide_log(clr("[build] Note: Available build types:"))
for bt_name in build_job_creators.keys():
wide_log(clr("[build] - `{}`".format(bt_name)))
# Queue for communicating status
event_queue = Queue()
try:
# Spin up status output thread
status_thread = ConsoleStatusController(
'build',
['package', 'packages'],
jobs,
n_jobs,
[pkg.name for _, pkg in context.packages],
[p for p in context.buildlist],
[p for p in context.skiplist],
event_queue,
show_notifications=not no_notify,
show_active_status=not no_status,
show_buffered_stdout=not quiet and not interleave_output,
show_buffered_stderr=not interleave_output,
show_live_stdout=interleave_output,
show_live_stderr=interleave_output,
show_stage_events=not quiet,
show_full_summary=(summarize_build is True),
pre_start_time=pre_start_time,
active_status_rate=limit_status_rate)
status_thread.start()
# Initialize locks
locks = {
'installspace': asyncio.Lock() if lock_install else FakeLock()
}
# Block while running N jobs asynchronously
try:
all_succeeded = run_until_complete(execute_jobs(
'build',
jobs,
locks,
event_queue,
context.log_space_abs,
max_toplevel_jobs=n_jobs,
continue_on_failure=continue_on_failure,
continue_without_deps=False))
except Exception:
status_thread.keep_running = False
all_succeeded = False
status_thread.join(1.0)
wide_log(str(traceback.format_exc()))
status_thread.join(1.0)
# Warn user about new packages
now_built_packages, now_unbuilt_pkgs = get_built_unbuilt_packages(context, workspace_packages)
new_pkgs = [p for p in unbuilt_pkgs if p not in now_unbuilt_pkgs]
if len(new_pkgs) > 0:
log(clr("[build] @/@!Note:@| @/Workspace packages have changed, "
"please re-source setup files to use them.@|"))
if all_succeeded:
# Create isolated devel setup if necessary
if context.isolate_devel:
if not context.install:
_create_unmerged_devel_setup(context, now_unbuilt_pkgs)
else:
_create_unmerged_devel_setup_for_install(context)
return 0
else:
return 1
except KeyboardInterrupt:
wide_log("[build] Interrupted by user!")
event_queue.put(None)
return 130 # EOWNERDEAD return code is not part of the errno module.
def _create_unmerged_devel_setup(context, unbuilt):
# Find all of the leaf packages in the workspace
# where leaf means that nothing in the workspace depends on it
ordered_packages = context.packages
workspace_packages = dict([(p.name, p) for pth, p in ordered_packages])
# Get all packages which are dependencies of packages in the workspace which have been built
dependencies = set(sum([
[d.name for d in p.buildtool_depends + p.build_depends + p.run_depends]
for _, p in workspace_packages.items()
if p.name not in unbuilt
], []))
# Compute the packages on which no other packages depend
leaf_packages = [
pkg.name
for name, pkg in workspace_packages.items()
if pkg.name not in dependencies
]
leaf_paths = [
os.path.join(context.devel_space_abs, p, 'setup.sh')
for p in leaf_packages
]
leaf_sources = [
'. {}'.format(source_path)
for source_path in leaf_paths
if os.path.isfile(source_path)
]
# In addition to the leaf packages, we need to source the recursive run depends of the leaf packages
run_depends_packages = get_recursive_run_depends_in_workspace(
[workspace_packages[p] for p in leaf_packages], ordered_packages)
run_depends_paths = [
os.path.join(context.devel_space_abs, pth, 'setup.sh')
for pth, pkg in run_depends_packages
]
run_depends_sources = [
'. {}'.format(source_path)
for source_path in run_depends_paths
if os.path.isfile(source_path)
]
# Create the setup.sh file
setup_sh_path = os.path.join(context.devel_space_abs, 'setup.sh')
env_file = SETUP_SH_TEMPLATE.format(
first_source=leaf_sources[0],
leaf_sources='\n'.join(leaf_sources[1:]),
run_depends_sources='\n'.join(run_depends_sources)
)
with open(setup_sh_path, 'w') as f:
f.write(env_file)
# Create setup.bash file
setup_bash_path = os.path.join(context.devel_space_abs, 'setup.bash')
with open(setup_bash_path, 'w') as f:
f.write(SETUP_BASH_TEMPLATE)
# Create setup.zsh file
setup_zsh_path = os.path.join(context.devel_space_abs, 'setup.zsh')
with open(setup_zsh_path, 'w') as f:
f.write(SETUP_ZSH_TEMPLATE)
def _create_unmerged_devel_setup_for_install(context):
"""Create non-functioning placeholder scripts in develspace."""
for path in [os.path.join(context.devel_space_abs, f) for f in ['setup.sh', 'setup.bash', 'setup.zsh']]:
with open(path, 'w') as f:
f.write(SETUP_PLACEHOLDER_TEMPLATE)
SETUP_SH_TEMPLATE = """\
#!/usr/bin/env sh
# generated from within catkin_tools/verbs/catkin_build/build.py
# This file is aggregates the many setup.sh files in the various
# unmerged devel spaces in this folder.
# This is accomplished by sourcing each leaf package and all the
# recursive run dependencies of those leaf packages
# Source the first package's setup.sh without the --extend option
{first_source}
# remove all passed in args, resetting $@, $*, $#, $n
shift $#
# set the --extend arg for rest of the packages setup.sh's
set -- $@ "--extend"
# source setup.sh for each of the leaf packages in the workspace
{leaf_sources}
# And now the setup.sh for each of their recursive run dependencies
{run_depends_sources}
"""
SETUP_BASH_TEMPLATE = """\
#!/usr/bin/env bash
# generated from within catkin_tools/verbs/catkin_build/build.py
CATKIN_SHELL=bash
# source setup.sh from same directory as this file
_BUILD_SETUP_DIR=$(builtin cd "`dirname "${BASH_SOURCE[0]}"`" && pwd)
. "$_BUILD_SETUP_DIR/setup.sh"
"""
SETUP_ZSH_TEMPLATE = """\
#!/usr/bin/env zsh
# generated from within catkin_tools/verbs/catkin_build/build.py
CATKIN_SHELL=zsh
# source setup.sh from same directory as this file
_BUILD_SETUP_DIR=$(builtin cd -q "`dirname "$0"`" && pwd)
emulate sh # emulate POSIX
. "$_BUILD_SETUP_DIR/setup.sh"
emulate zsh # back to zsh mode
"""
SETUP_PLACEHOLDER_TEMPLATE = """\
#!/usr/bin/env sh
# generated from within catkin_tools/verbs/catkin_build/build.py
echo "Error: This workspace was built with the '--install' option."
echo " You should source the setup files in the install space instead."
echo " Your environment has not been changed."
"""
| |
# Copyright 2016 Mobile CSP Project. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Classes and methods to create and manage the Teacher Dashboard.
Based off of the announcements module, which was created by
saifu@google.com.
"""
__author__ = 'Saifu Angto (saifu@google.com)'
__author__ = 'ehiller@css.edu'
__author__ = 'Ralph Morelli (ram8647@gmail.com)'
import cgi
import datetime
import os
import urllib
import logging
import jinja2
import appengine_config
from common import tags
from common import utils as common_utils
from common import schema_fields
from common import jinja_utils
from controllers import utils
from models import resources_display
from models import custom_modules
from models import entities
from models import models
from models import roles
from models import transforms
from models import utils as models_utils
from models.models import MemcacheManager
from models.models import Student
from models.models import EventEntity
from modules.teacher import messages
from modules.dashboard import dashboard
from modules.oeditor import oeditor
from google.appengine.ext import db
from google.appengine.api import users
# Our modules classes
from course_entity import CourseSectionEntity
from course_entity import SectionItemRESTHandler
from teacher_entity import TeacherEntity
from teacher_entity import TeacherItemRESTHandler
from teacher_entity import TeacherRights
from student_activites import ActivityScoreParser
from student_answers import StudentAnswersEntity
GLOBAL_DEBUG = False
MODULE_NAME = 'teacher'
MODULE_TITLE = 'Teacher Dashboard'
#Setup paths and directories for templates and resources
RESOURCES_PATH = '/modules/teacher/resources'
TEMPLATE_DIR = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules', MODULE_NAME, 'templates')
# These are the module's templates. The first is the teacher's splash page.
TEACHERS_TEMPLATE = os.path.join(TEMPLATE_DIR, 'teacher_dashboard.html')
STUDENT_ROSTER_TEMPLATE = os.path.join(TEMPLATE_DIR, 'student_roster.html')
STUDENT_DASHBOARD_TEMPLATE = os.path.join(TEMPLATE_DIR, 'student_dashboard.html')
QUESTION_PREVIEW_TEMPLATE = os.path.join(TEMPLATE_DIR, 'question_preview.html')
class TeacherHandlerMixin(object):
def get_admin_action_url(self, action, key=None):
args = {'action': action}
if key:
args['key'] = key
return self.canonicalize_url(
'{}?{}'.format(
AdminDashboardHandler.URL, urllib.urlencode(args)))
def get_dashboard_action_url(self, action, key=None):
args = {'action': action}
if key:
args['key'] = key
return self.canonicalize_url(
'{}?{}'.format(
TeacherDashboardHandler.DASHBOARD_URL, urllib.urlencode(args)))
def format_admin_template(self, items):
""" Formats the template for the Admin 'Add Teacher' page.
When clicked the 'Admin: Add Teacher button opens up
a list of teachers plus and 'Add Teacher' button.
"""
template_items = []
for item in items:
item = transforms.entity_to_dict(item)
date = item.get('date')
if date:
date = datetime.datetime.combine(
date, datetime.time(0, 0, 0, 0))
item['date'] = (
date - datetime.datetime(1970, 1, 1)).total_seconds() * 1000
# add 'edit' actions
if TeacherRights.can_edit_section(self):
item['edit_action'] = self.get_admin_action_url(
AdminDashboardHandler.ADMIN_EDIT_ACTION, key=item['key'])
item['delete_xsrf_token'] = self.create_xsrf_token(
AdminDashboardHandler.ADMIN_DELETE_ACTION)
item['delete_action'] = self.get_admin_action_url(
AdminDashboardHandler.ADMIN_DELETE_ACTION,
key=item['key'])
template_items.append(item)
output = {}
output['children'] = template_items
# Add actions for the 'Add Teacher'
if TeacherRights.can_edit(self):
output['add_xsrf_token'] = self.create_xsrf_token(
AdminDashboardHandler.ADMIN_ADD_ACTION)
output['add_action'] = self.get_admin_action_url(
AdminDashboardHandler.ADMIN_ADD_ACTION)
return output
def format_dashboard_template(self, sections, user_email):
""" Formats the template for the main Teacher Dashboard page.
This is the page that registered teachers will see. It consists of
list of the teacher's course sections and buttons to manage the
sections.
"""
template_sections = []
if sections:
for section in sections:
section = transforms.entity_to_dict(section)
date = section.get('date')
if date:
date = datetime.datetime.combine(
date, datetime.time(0, 0, 0, 0))
section['date'] = (
date - datetime.datetime(1970, 1, 1)).total_seconds() * 1000
if GLOBAL_DEBUG:
logging.debug('***RAM*** format template section = ' + str(section))
# Add 'edit' and 'delete' actions to each section that will be displayed
if section['teacher_email'] == user_email and TeacherRights.can_edit_section(self):
section['edit_action'] = self.get_dashboard_action_url(
TeacherDashboardHandler.EDIT_SECTION_ACTION, key=section['key'])
section['delete_xsrf_token'] = self.create_xsrf_token(
TeacherDashboardHandler.DELETE_SECTION_ACTION)
section['delete_action'] = self.get_dashboard_action_url(
TeacherDashboardHandler.DELETE_SECTION_ACTION,
key=section['key'])
template_sections.append(section)
output = {}
output['sections'] = template_sections
# Add actions for the 'New Section' button
output['newsection_xsrf_token'] = self.create_xsrf_token(
TeacherDashboardHandler.ADD_SECTION_ACTION)
output['add_section'] = self.get_dashboard_action_url(
TeacherDashboardHandler.ADD_SECTION_ACTION)
# Add actions of the 'Admin' button -- to add new teachers
if TeacherRights.can_edit(self):
output['is_admin'] = True
output['add_xsrf_token'] = self.create_xsrf_token(
AdminDashboardHandler.ADMIN_LIST_ACTION)
output['add_action'] = self.get_admin_action_url(
AdminDashboardHandler.ADMIN_LIST_ACTION)
return output
class TeacherDashboardHandler(
TeacherHandlerMixin, utils.BaseHandler,
utils.ReflectiveRequestHandler):
""" Handle all Teacher (non-Admin) functions for the Teacher Dashboard.
The Teacher functions include creating and deleting course sections,
adding and removing students from sections, and monitoring student
performance. The Admin functions consist solely of registering teachers
and are handled by AdminDashboardHandler.
"""
# Actions for the various Section functions
LIST_SECTION_ACTION = 'edit_sections'
EDIT_SECTION_ACTION = 'edit_section'
DELETE_SECTION_ACTION = 'delete_section'
ADD_SECTION_ACTION = 'add_section'
DISPLAY_ROSTER_ACTION = 'display_roster'
STUDENT_DASHBOARD_ACTION = 'student_dashboard'
PREVIEW_QUESTION = 'question_preview'
# The links for Teacher functions
DASHBOARD_LINK_URL = 'teacher'
DASHBOARD_URL = '/{}'.format(DASHBOARD_LINK_URL)
DASHBOARD_LIST_URL = '{}?action={}'.format(DASHBOARD_LINK_URL, LIST_SECTION_ACTION)
# Not sure what these do? May be expendable?
default_action = 'edit_sections'
get_actions = [default_action, LIST_SECTION_ACTION, EDIT_SECTION_ACTION,
ADD_SECTION_ACTION, DISPLAY_ROSTER_ACTION, STUDENT_DASHBOARD_ACTION, PREVIEW_QUESTION]
post_actions = [DELETE_SECTION_ACTION]
def is_registered_teacher(self, user_email):
"""Determines if current user is a registered teacher."""
items = TeacherEntity.get_teachers()
items = TeacherRights.apply_rights(self, items)
for teacher in items:
if GLOBAL_DEBUG:
logging.debug('***RAM*** teacher = ' + str(teacher.email))
logging.debug('***RAM*** user ' + str(users.User.email(user)))
if teacher.email == user_email:
return True
return False
def _render(self):
""" Renders the TEACHERS_TEMPLATE by calling super.render(template)
This assumes that the template's values are in template_value.
"""
self.template_value['navbar'] = {'teacher': True}
self.render(TEACHERS_TEMPLATE)
def _render_roster(self):
""" Renders the STUDENT_ROSTER_TEMPLATE by calling super.render(template)
This assumes that the template's values are in template_value.
"""
self.template_value['navbar'] = {'teacher': True}
self.render(STUDENT_ROSTER_TEMPLATE)
def _render_student_dashboard(self):
""" Renders the STUDENT_DASHBOARD_TEMPLATE by calling super.render(template)
This assumes that the template's values are in template_value.
"""
self.template_value['navbar'] = {'teacher': True}
self.render(STUDENT_DASHBOARD_TEMPLATE)
def render_page(self, template):
""" Renders the template that's supplied as an argument."""
self.template_value['navbar'] = {'teacher': True}
self.render(template)
def get_question_preview(self):
"""
Provides a preview of quiz questions.
Invoked from student_dashboard. The question is displayed in a modal
window that is initialized in modal-window.js.
This is an adaptation of the question_preview used by the dashboard module.
It supports Quizly questions.
"""
self.template_value['navbar'] = {'teacher': True}
self.template_value['resources_path'] = RESOURCES_PATH
url = self.request.get('url')
if url == '':
self.template_value['question'] = tags.html_to_safe_dom(
'<question quid="{}">'.format(self.request.get('quid')), self)
else:
self.template_value['url'] = url
self.template_value['question'] = 'Quizly'
self.render(QUESTION_PREVIEW_TEMPLATE)
def get_edit_sections(self):
""" Displays a list of this teacher's sections, using the TEACHERS_TEMPLATE.
This callback method automatically handles 'edit_sections' actions and must be
named 'get_edit_sections'.
This action displays the splash page for the Teacher Dashboard. It
displays when the user clicks on the navbar 'Teachers' tab. From there
the Teacher can manage all their sections. It also contains an
'Admin: Add Teacher' button, which is visible only to admin users.
Its action is handled by AdminDashboardHandler.
The template is injected with a list of this teacher's sections.
"""
# Make sure the user is registered and a registered teacher
# If not redirect to main course page
alerts = []
user_email = ''
disable = False
if not users.get_current_user():
alerts.append('Access denied. Only registered teachers can use this feature.')
disable = True
else:
user_email = users.get_current_user().email()
if not self.is_registered_teacher(user_email):
alerts.append('Access denied. Please see a course admin.')
disable = True
if disable:
self.redirect('/course')
else:
sections = CourseSectionEntity.get_sections()
sections = TeacherRights.apply_rights(self, sections)
if GLOBAL_DEBUG:
logging.debug('***RAM*** Trace: get_edit_sections')
# self._render will render the SECTIONS template
self.template_value['teacher'] = self.format_dashboard_template(sections, user_email)
self.template_value['teacher_email'] = user_email
self.template_value['alerts'] = alerts # Not really used anymore
self.template_value['disabled'] = disable # Not really used anymore
self._render()
def get_add_section(self):
""" Shows an editor for a section entity.
This callback method is triggered when the user clicks on the
'Create New Section' button in the Teacher splach page.
"""
if not TeacherRights.can_add_section(self):
self.error(401)
return
if GLOBAL_DEBUG:
logging.debug('***RAM** get_add_section')
entity = CourseSectionEntity.make('', '', '', True)
entity.put()
self.redirect(self.get_dashboard_action_url(
self.EDIT_SECTION_ACTION, key=entity.key()))
def get_edit_section(self):
"""Shows an editor for a section."""
key = self.request.get('key')
schema = SectionItemRESTHandler.SCHEMA()
exit_url = self.canonicalize_url('/{}'.format(self.DASHBOARD_LIST_URL))
rest_url = self.canonicalize_url('/rest/section/item')
form_html = oeditor.ObjectEditor.get_html_for(
self,
schema.get_json_schema(),
schema.get_schema_dict(),
key, rest_url, exit_url,
delete_method='delete',
delete_message='Are you sure you want to delete this section?',
delete_url=self._get_delete_url(
SectionItemRESTHandler.URL, key, 'section-delete'),
display_types=schema.get_display_types())
if GLOBAL_DEBUG:
logging.debug('***RAM** get_edit_section rendering page')
self.template_value['main_content'] = form_html;
self._render()
def post_delete_section(self):
"""Deletes a section."""
if not TeacherRights.can_delete_section(self):
self.error(401)
return
if GLOBAL_DEBUG:
logging.debug('***RAM** post_delete_section')
key = self.request.get('key')
entity = CourseSectionEntity.get(key)
if entity:
entity.delete()
self.redirect('/{}'.format(self.DASHBOARD_LIST_URL))
def _get_delete_url(self, base_url, key, xsrf_token_name):
return '%s?%s' % (
self.canonicalize_url(base_url),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(xsrf_token_name)),
}))
def get_lessons_for_roster(self, units, course):
lessons = {}
for unit in units:
unit_lessons = course.get_lessons(unit.unit_id)
unit_lessons_filtered = []
for lesson in unit_lessons:
unit_lessons_filtered.append({
'title': lesson.title,
'unit_id': lesson.unit_id,
'lesson_id': lesson.lesson_id
})
lessons[unit.unit_id] = unit_lessons_filtered
# Convert to JSON
return transforms.dumps(lessons, {})
def calculate_lessons_progress(self, lessons_progress):
""" Returns a dict summarizing student progress on the lessons in each unit."""
if GLOBAL_DEBUG:
logging.debug('***RAM*** lessons_progress ' + str(lessons_progress))
lessons = {}
total = 0
for key in lessons_progress:
progress = lessons_progress[key]['html']
if progress == 2: # 2=complete, 1= inprogress, 0=unstarted
total += 1
lessons[str(key)] = progress
lessons['progress'] = str(round(total / len(lessons) * 100, 2))
if GLOBAL_DEBUG:
logging.debug('***RAM*** calc lessons = ' + str(lessons))
return lessons
def calculate_student_progress_data(self, student, course, tracker, units):
""" Returns a dict that summarizes student progress for course, units, and lessons.
The dict takes the form: {'course_progress': c, 'unit_completion': u, 'lessons_progress': p}
where 'course_progress' is a number giving the overall percentage of lessons completed
as calculated by GCB, 'unit_completion' gives the completion percentage of each unit,
as calculated by GCB, and 'lessons_progress', gives a summary of the lesson progress
for each unit, as calculated by us.
"""
# Progress on each unit in the course -- an unitid index dict
unit_progress_raw = tracker.get_unit_percent_complete(student)
unit_progress_data = {}
for key in unit_progress_raw:
unit_progress_data[str(key)] = str(round(unit_progress_raw[key] * 100,2));
if GLOBAL_DEBUG:
logging.debug('***RAM*** unit_progress_data ' + str(unit_progress_data))
# An object that summarizes student progress
student_progress = tracker.get_or_create_progress(student)
if GLOBAL_DEBUG:
logging.debug('***RAM*** student_progress ' + str(student_progress))
# Overall progress in the course -- a per cent, rounded to 3 digits
course_progress = 0
for value in unit_progress_raw.values():
course_progress += value
course_progress = str(round(course_progress / len(unit_progress_data) * 100,2))
# Progress on each lesson in the coure -- a tuple-index dict: dict[(unitid,lessonid)]
units_lessons_progress = {}
for unit in units:
if GLOBAL_DEBUG:
logging.debug('***RAM*** unit = ' + str(unit.unit_id))
# Don't show assessments that are part of unit
if course.get_parent_unit(unit.unit_id):
continue
if unit.unit_id in unit_progress_raw:
lessons_progress = tracker.get_lesson_progress(student, unit.unit_id, student_progress)
if GLOBAL_DEBUG:
logging.debug('***RAM*** lesson_status = ' + str(lessons_progress))
units_lessons_progress[str(unit.unit_id)] = self.calculate_lessons_progress(lessons_progress)
return {'unit_completion':unit_progress_data, 'course_progress':course_progress, 'lessons_progress': units_lessons_progress }
def retrieve_student_scores_and_attempts(self, student_email, course):
scores = {}
student = Student.get_first_by_email(student_email)[0] # returns a tuple
scores = ActivityScoreParser.get_activity_scores([student.user_id], course, True)
if GLOBAL_DEBUG:
logging.debug('***RAM*** get activity scores ' + str(scores))
return scores
def calculate_performance_ratio(self, aggregate_scores, email):
if email not in aggregate_scores.keys():
return aggregate_scores
scores = aggregate_scores[email]
for unit in scores:
for lesson in scores[unit]:
n_questions = 0
n_correct = 0
for quest in scores[unit][lesson]:
n_questions += 1
n_correct += scores[unit][lesson][quest]['score']
scores[unit][lesson]['ratio'] = str(n_correct) + "/" + str(n_questions)
return scores
def create_student_table(self, email, course, tracker, units, get_scores=False):
student_dict = {}
student = Student.get_first_by_email(email)[0] # returns a tuple
if student:
progress_dict = self.calculate_student_progress_data(student,course,tracker,units)
if get_scores:
scores = self.retrieve_student_scores_and_attempts(email, course)
student_dict['attempts'] = scores['attempts']
# student_dict['scores'] = scores['scores']
student_dict['scores'] = self.calculate_performance_ratio(scores['scores'], email)
student_dict['name'] = student.name
student_dict['email'] = student.email
student_dict['progress_dict'] = progress_dict
student_dict['has_scores'] = get_scores
return student_dict
def create_student_data_table(self, course, section, tracker, units, student_email = None):
""" Creates a lookup table containing all student progress data
for every unit, lesson, and quiz.
"""
# If called from get_student_dashboad to get stats for a single student
if student_email:
return self.create_student_table(student_email, course, tracker, units, get_scores=True)
if section.students:
index = section.students.split(',') # comma-delimited emails
else:
index = []
if GLOBAL_DEBUG:
logging.debug('***RAM*** students index : ' + str(index))
students = []
if len(index) > 0:
for email in index:
student_dict = self.create_student_table(email, course, tracker, units, get_scores=False)
if student_dict:
students.append(student_dict)
return students
def get_display_roster(self):
"""Callback method to display the Roster view.
This is called when the user clicks on the 'View Roster' button
from the main Teacher Dashboard page. It displays all students
in a single course section and their progress in the course.
Also allows the teacher to manage the section.
"""
key = self.request.get('key')
course_section = CourseSectionEntity.get(key)
# Get a progress tracker for the course
this_course = self.get_course()
tracker = this_course.get_progress_tracker()
# Get this course's units
units = this_course.get_units()
units_filtered = filter(lambda x: x.type == 'U', units) #filter out assessments
# And lessons
lessons = self.get_lessons_for_roster(units_filtered, this_course)
# Get students and progress data for this section
students = self.create_student_data_table(this_course, course_section, tracker, units_filtered)
if GLOBAL_DEBUG:
logging.debug('***RAM*** Units : ' + str(units_filtered))
logging.debug('***RAM*** Lessons : ' + str(lessons))
logging.debug('***RAM*** Students : ' + str(students))
user_email = users.get_current_user().email()
self.template_value['resources_path'] = RESOURCES_PATH
self.template_value['section'] = { 'key': key, 'teacher': user_email, 'name' : course_section.name, 'description' : course_section.description }
self.template_value['units'] = units_filtered
self.template_value['lessons'] = lessons
self.template_value['students'] = students
self.template_value['students_json'] = transforms.dumps(students, {}) # for use with javascript
self._render_roster()
def get_student_dashboard(self):
"""Callback method to display details of the student performance.
This is called when the user clicks on the 'View Dashboard' button
from the Section Roster page. It displays details for all
units and lessons.
"""
student_email = self.request.get('student')
this_course = self.get_course()
tracker = this_course.get_progress_tracker()
# Get this course's units
units = this_course.get_units()
units_filtered = filter(lambda x: x.type == 'U', units) #filter out assessments
self.template_value['student_email'] = student_email
self.template_value['units'] = units_filtered
self.template_value['lessons'] = self.get_lessons_for_roster(units_filtered, this_course)
student_dict = self.create_student_data_table(this_course, None, tracker, units_filtered, student_email)
if GLOBAL_DEBUG:
logging.debug('***RAM*** Student : ' + str(student_dict))
self.template_value['student'] = student_dict
self.template_value['studentJs'] = transforms.dumps(student_dict, {}) # for use with javascript
self._render_student_dashboard()
class AdminDashboardHandler(TeacherHandlerMixin, dashboard.DashboardHandler):
""" Handler for all Admin functions, which basically consists of giving teachers
access to the Teacher Dashboard.
This is a subclass of DashboardHandler, so it comes with functionality that
is available to other Handlers, mainly in how pages are rendered.
DashboardHandler has a render_page method that is not available in other
handlers.
"""
# The various Admin Actions
ADMIN_LIST_ACTION = 'edit_teachers'
ADMIN_EDIT_ACTION = 'edit_teacher'
ADMIN_DELETE_ACTION = 'delete_teacher'
ADMIN_ADD_ACTION = 'add_teacher'
# Not sure what these do?
get_actions = [ADMIN_EDIT_ACTION, ADMIN_LIST_ACTION]
post_actions = [ADMIN_ADD_ACTION, ADMIN_DELETE_ACTION]
ADMIN_LINK_URL = 'mcsp_admin'
URL = '/{}'.format(ADMIN_LINK_URL)
ADMIN_LIST_URL = '{}?action={}'.format(ADMIN_LINK_URL, ADMIN_LIST_ACTION)
@classmethod
def get_child_routes(cls):
""" Add child handlers for REST. The REST handlers perform
retrieve and store teachers, sections, and other data
used by the Teacher Dashboard.
"""
if GLOBAL_DEBUG:
logging.debug('***RAM** get_child_routes')
return [
(TeacherItemRESTHandler.URL, TeacherItemRESTHandler),
(SectionItemRESTHandler.URL, SectionItemRESTHandler)
]
def get_edit_teachers(self):
""" Displays a list of registered teachers.
This is the splash page for Admin users of Teacher Dashboard.
It is reached by clicking the 'Admin: Add Teacher' button in
the Teacher Dashboard splash page. From this page Admins can
perform all tasks associated with registering teachers.
"""
items = TeacherEntity.get_teachers()
items = TeacherRights.apply_rights(self, items)
if GLOBAL_DEBUG:
logging.debug('***RAM** Trace: get_edit_teachers')
main_content = self.get_template(
'mcsp_admin_dashboard.html', [TEMPLATE_DIR]).render({
'teachers': self.format_admin_template(items),
})
self.render_page({
'page_title': self.format_title('Teachers'),
'main_content': jinja2.utils.Markup(main_content)})
def get_edit_teacher(self):
"""Shows an editor for a teacher."""
key = self.request.get('key')
schema = TeacherItemRESTHandler.SCHEMA()
exit_url = self.canonicalize_url('/{}'.format(self.ADMIN_LIST_URL))
rest_url = self.canonicalize_url('/rest/teacher/item')
form_html = oeditor.ObjectEditor.get_html_for(
self,
schema.get_json_schema(),
schema.get_schema_dict(),
key, rest_url, exit_url,
delete_method='delete',
delete_message='Are you sure you want to delete this teacher?',
delete_url=self._get_delete_url(
TeacherItemRESTHandler.URL, key, 'teacher-delete'),
display_types=schema.get_display_types())
if GLOBAL_DEBUG:
logging.debug('***RAM** get_edit_teacher rendering page')
self.render_page({
'main_content': form_html,
'page_title': 'Edit Teacher',
}, in_action=self.ADMIN_LIST_ACTION)
def post_delete_teacher(self):
"""Deletes an teacher."""
if not TeacherRights.can_delete(self):
self.error(401)
return
if GLOBAL_DEBUG:
logging.debug('***RAM** post_delete_teacher')
key = self.request.get('key')
entity = TeacherEntity.get(key)
if entity:
entity.delete()
self.redirect('/{}'.format(self.ADMIN_LIST_URL))
def post_add_teacher(self):
"""Adds a new teacher and redirects to an editor for it."""
if not TeacherRights.can_add(self):
self.error(401)
return
if GLOBAL_DEBUG:
logging.debug('***RAM** post_add_teacher')
entity = TeacherEntity.make('', '', '')
entity.put()
self.redirect(self.get_admin_action_url(
self.ADMIN_EDIT_ACTION, key=entity.key()))
def _get_delete_url(self, base_url, key, xsrf_token_name):
return '%s?%s' % (
self.canonicalize_url(base_url),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(xsrf_token_name)),
}))
def record_tag_assessment(source, user, data):
""" Callback function when student attempts a quiz question.
A tag-assessment event is a submittal of an answer
for a lesson question or quizly exercise. The data
dict has some of the information that is needed to
construct a performance profile on these answers for
this user.
"""
if source == 'tag-assessment':
StudentAnswersEntity.record(user, data)
if GLOBAL_DEBUG:
logging.debug('***RAM*** data = ' + str(data))
def notify_module_enabled():
"""Handles things after module has been enabled.
Adding an event listener to EventEntity lets us record
student activity on questions and quizly exercises in
our own database as they occur.
TODO: Try to measure the cost of storing this extra
data.
"""
EventEntity.EVENT_LISTENERS.append(record_tag_assessment)
custom_module = None
def register_module():
"""Registers this module in the registry."""
handlers = [
(AdminDashboardHandler.URL, AdminDashboardHandler),
(TeacherDashboardHandler.DASHBOARD_URL, TeacherDashboardHandler)
]
# These are necessary to access the js and css resources.
global_routes = [
(RESOURCES_PATH + '/js/modal-window.js', tags.ResourcesHandler),
(RESOURCES_PATH + '/js/tipped.js', tags.ResourcesHandler),
(RESOURCES_PATH + '/css/question_preview.css', tags.ResourcesHandler),
(RESOURCES_PATH + '/css/student_progress.css', tags.ResourcesHandler),
(RESOURCES_PATH + '/css/tipped.css', tags.ResourcesHandler),
]
dashboard.DashboardHandler.add_sub_nav_mapping(
'analytics', MODULE_NAME, MODULE_TITLE,
action=AdminDashboardHandler.ADMIN_LIST_ACTION,
href=AdminDashboardHandler.ADMIN_LIST_URL,
placement=1000, sub_group_name='pinned')
global custom_module # pylint: disable=global-statement
custom_module = custom_modules.Module(
MODULE_TITLE,
'A set of pages for managing course teachers.',
global_routes, handlers,
notify_module_enabled=notify_module_enabled)
return custom_module
| |
#!/usr/bin/env python
""" Main program to run pySPACE
For further instructions take a look at the pySPACE documentation and the tutorials
in there!
.. note::
Due to errors in configuration files, data or the software, the software may
crash. Because of internal parallelization and threading, it is currently
not possible to use ''ctrl + c''. So you should kill the processes manually
e.g.::
ctrl + z
kill -9 %1
fg
**Profiling**
For profiling the software you should use the option ``--profile``,
when running pySPACE. Furthermore you should use the
:class:`~pySPACE.environments.backends.serial.SerialBackend`.
Otherwise the started subprocesses can not be examined.
The result is called `profile.pstat` and saved in your result folder.
For getting a print out of this profiling, you can use gprof2dot.py in the
library folder. (partial) Copy from the documentation:
General usage::
python gprof2dot.py -f pstats profiling_file -r pySPACE | dot -Tpng -o output.png
or simple usage in result folder, when pyspace is on same level as general
storage folder::
python ../../../pyspace/pySPACE/tools/gprof2dot.py -f pstats profile.pstat | dot -Tpng -o output.png
where profiling_file is the file that is generated by the cProfile module
and output.png is the filename of the resulting picture.
The option '-r', '--restrict' is there to eliminate functions in the
profiling, that do not contain this string in
their path name [default: None].
.. note:: For creating the graphic, which is done with the 'dot' command,
the GraphViz package needs to be installed.
"""
# general imports
import os
import shutil
import sys
import logging
from optparse import OptionParser
import cProfile
import yaml
import warnings
# adding pySPACE to system path for import
file_path = os.path.dirname(os.path.realpath(__file__))
pyspace_path = file_path[:file_path.rfind('pySPACE')-1]
if not pyspace_path in sys.path:
sys.path.append(pyspace_path)
import pySPACE
import_path = os.path.realpath(os.path.join(os.path.dirname(pySPACE.__file__),
os.path.pardir))
if not import_path == pyspace_path:
warnings.warn("Check your Python path! "+
"'%s' is the expected pySPACE path," % pyspace_path +
" but '%s' is used." % import_path)
# pySPACE imports
from pySPACE.missions.operations.base import Operation, create_operation_from_file
from pySPACE.tools.filesystem import get_relative_path, create_source_archive
from pySPACE import create_backend
from pySPACE.environments.chains.operation_chain import create_operation_chain
from pySPACE.environments.big_bang import LOGGER
#import matplotlib
#matplotlib.use("MacOSX") #MacOSX")
class LaunchParser(OptionParser):
""" Overwrite epilog printout
Code taken from:
http://stackoverflow.com/questions/5961160/displaying-newlines-in-the-help-message-when-using-pythons-optparse
"""
def format_epilog(self, formatter):
""" Simply do not change the format of the string """
return self.epilog
epilog=\
"""
This is the script to launch pySPACE.
For detailed documentation on pySPACE refer to the online documentation at
http://pyspace.github.io/pyspace/index.html,
the __init__ file in the pySPACE folder, or the index.rst in the docs folder.
This script shall start pySPACE in the standard benchmark flow.
If you used the setup.py before or another installation program
all relevant files should be found in the folder `pySPACEcenter`
in your home directory. Otherwise it will be searched for in your
`PYSPACE_CONF_DIR`.
The main configuration is specified in the <config.yaml>. If you run pySPACE
for the first time, have a look at it and the therein specified environment
parameters.
Due to errors in configuration files, data or the software, the software may
crash. Because of internal parallelization and threading, it is currently
not possible to use ''ctrl + c''. So you should kill the processes manually
e.g.::
ctrl + z
kill -9 %1
fg
"""
def run_operation(default_backend, operation, ex_timeout=1e6, re_timeout=1e6):
""" Runs the given operation on the backend
Runs the given operation *operation* either on the backend specified in the
operation' spec file or (if none is specified) on the backend
passed as *default_backend*.
Different timeouts are required, because for the execute function get is
called which does not accept to high timeouts without proper error handling
on a Mac OS X whereas Linux systems are fine with larger timeouts.
"""
# Check if default backend can be used or if we have to run on a separate
# backend
if "backend" in operation.operation_spec:
backend = create_backend(operation.operation_spec["backend"])
LOGGER.info(" --> For current operation using backend: \n\t\t %s."%str(backend))
else:
backend = default_backend
# In case a operation_chain is executed the queue needs to be reset, since
# the the first terminated operation cleans and closes the queue.
if backend.__str__() == "MulticoreBackend":
backend.reset_queue()
backend.stage_in(operation)
try:
backend.execute(timeout=ex_timeout)
backend.retrieve(timeout=re_timeout)
backend.consolidate()
return operation.get_output_directory()
finally:
backend.cleanup()
def run_operation_chain(default_backend, operation_chain):
""" Runs the given operation chain on the backend
Runs the given operation chain *operation_chain* on the backend passed as
*default_backend*.
.. todo:: document override mode here and in tutorial
.. todo:: documentation needed for prepare operation and hidden params
.. todo:: parameter settings missing instead of parameter ranges?
"""
base_result_dir = operation_chain.get_output_directory()
input_path = operation_chain["input_path"]
prepare_operation = operation_chain["prepare_operation"] \
if "prepare_operation" in operation_chain else None
operations = operation_chain["operations"]
runs = operation_chain["runs"] if "runs" in operation_chain else 1
# Run prepare operation if requested
if prepare_operation is not None:
LOGGER.info("Running prepare operation of the operation chain")
# Create operation object for specified prepare operation
operation = create_operation_from_file(prepare_operation,
base_result_dir)
output_directory = run_operation(default_backend, operation)
# Rename output_directory
preparation_directory = os.sep.join(output_directory.split(os.sep)[:-1]) + \
os.sep + "prepare_operation"
shutil.move(output_directory, preparation_directory)
# Execute all operations of the operation chain sequentially
for index, operation in enumerate(operations):
overridden_params_dict = {}
if isinstance(operation, str):
op_spec_relative_filename = operation
else: # it should be a dictionary...
if 'operation_spec' in operation:
op_spec_relative_filename = operation['operation_spec']
else:
op_spec_relative_filename = None
try:
overridden_params_dict = operation["overridden_params"]
except KeyError:
pass
if op_spec_relative_filename is not None:
LOGGER.info("Running operation %s of the operation chain (%s/%s)" % \
(op_spec_relative_filename, index + 1, len(operations)))
spec_file_name = os.path.join(pySPACE.configuration.spec_dir,
"operations",
op_spec_relative_filename)
operation_spec = yaml.load(open(spec_file_name, "r"))
else:
# we expect to get everything from overridden params
operation_spec = {}
try:
operation_name = overridden_params_dict['operation_name']
except KeyError:
operation_name = "<unnamed>"
LOGGER.info("Running operation %s of the operation chain (%s/%s)" % \
(operation_name, index + 1, len(operations)))
operation_spec["input_path"] = input_path
operation_spec["runs"] = runs
# Add pseudo parameter "__PREPARE_OPERATION__" to parameter ranges
# if there was a prepare operation
if prepare_operation is not None :
if not "parameter_ranges" in operation_spec:
operation_spec["parameter_ranges"] = {}
operation_spec["parameter_ranges"]["__PREPARE_OPERATION__"] = [preparation_directory]
if not "hide_parameters" in operation_spec:
operation_spec["hide_parameters"] = []
operation_spec["hide_parameters"].append("__PREPARE_OPERATION__")
# override params with any explicitly specified params in the operation chain
# spec.
operation_spec.update(overridden_params_dict)
# use the operation factory method to create operation
operation = Operation.create(operation_spec,
base_result_dir = base_result_dir)
# Run the operation
output_directory = run_operation(default_backend, operation)
# The output acts as input for the next operation of the operation chain
input_path = get_relative_path(pySPACE.configuration.storage,
output_directory)
def main():
#### Find pySPACE package and import it ####
# Determine path of current file
path = os.path.realpath(__file__)
# Move up to parent directory that contains the pySPACE tree
suffix = []
for i in range(3):
path, tail = os.path.split(path)
suffix.append(tail)
parent_dir = path
# Check proper directory structure
if suffix != ['launch.py', 'run', 'pySPACE']:
raise RuntimeError, "Encountered incorrect directory structure. "\
"launch.py needs to reside in $PARENT_DIR/pySPACE/run"
# Workaround for eegserver crashing after 255 open ports
# - Now it crashes after 4096 open ports ;-)
#import resource
#(fd1, fd2) = resource.getrlimit(resource.RLIMIT_NOFILE)
#fd1 = 4096 if fd2 == resource.RLIM_INFINITY else fd2-1
#resource.setrlimit(resource.RLIMIT_NOFILE, (fd1,fd2))
# ------------------------------------------------------
#########################################
### Parsing of command line arguments
usage = "Usage: %prog [BACKEND_SPECIFICATION] [--config <conf.yaml>] "\
"[--operation <operation.yaml> | --operation_chain <operation_chain.yaml>] "\
"[--profile]"\
" where BACKEND_SPECIFICATION can be --serial, --mcore, --loadl or --mpi"
parser = LaunchParser(usage=usage, epilog=epilog)
# Configuration
parser.add_option("-c", "--configuration",
default="config.yaml",
help="Choose the configuration file, which is looked up in PYSPACE_CONF_DIR",
action="store")
# Backends
parser.add_option("-s", "--serial", action="store_true", default=False,
help="Enables execution on the SerialBackend (one local process)")
parser.add_option("-m", "--mcore", action="store_true", default=False,
help="Enables execution on the MulticoreBackend (one process per CPU core)")
parser.add_option("-l", "--local", action="store_true", default=False,
help="Enables execution on the MulticoreBackend (one process per CPU core)")
parser.add_option("-i", "--mpi", action="store_true", default=False,
help="Enables execution via MPI")
parser.add_option("-L", "--loadl", action="store_true", default=False,
help="Enables execution via LoadLeveler.")
# Operation / operation chain
parser.add_option("-o", "--operation",
help="Chooses the operation that will be executed. The "
"operation specification file is looked up in "
"$SPEC_DIR/operations",
action="store")
parser.add_option("-O", "-C", "--operation_chain",
help="Chooses the operation chain that will be executed. "
"The operation chain specification file is looked up "
"in $SPEC_DIR/operation_chains",
action="store")
# Profiling
parser.add_option("-p", "--profile",
help="Profiles execution.",
action="store_true", default=False,)
(options, args) = parser.parse_args()
# Load configuration file
pySPACE.load_configuration(options.configuration)
if hasattr(pySPACE.configuration, "eeg_acquisition_dir"):
eeg_parent_dir =\
os.sep.join(pySPACE.configuration.eeg_acquisition_dir.split(os.sep)[:-1])
if not hasattr(pySPACE.configuration, "eeg_acquisition_dir"):
pySPACE.configuration.eeg_module_path = eeg_parent_dir
else:
eeg_parent_dir, tail = os.path.split(parent_dir)
eeg_parent_dir = os.path.join(eeg_parent_dir, "eeg_modules")
pySPACE.configuration.eeg_module_path = eeg_parent_dir
sys.path.append(eeg_parent_dir)
# Create backend
if options.serial:
default_backend = create_backend("serial")
elif options.mcore or options.local:
default_backend = create_backend("mcore")
elif options.mpi:
default_backend = create_backend("mpi")
elif options.loadl:
default_backend = create_backend("loadl")
else: # Falling back to serial backend
default_backend = create_backend("serial")
LOGGER.info(" --> Using backend: \n\t\t %s."%str(default_backend))
if not options.operation is None:
# Create operation for the given name
operation = create_operation_from_file(options.operation)
# Store current source code for later inspection
create_source_archive(archive_path=operation.get_output_directory())
if not options.profile:
# Execute the current operation
run_operation(default_backend, operation)
else:
# Execute and profile operation
cProfile.runctx('pySPACE.run_operation(default_backend, operation)',
globals(), locals(),
filename = operation.get_output_directory()\
+ os.sep + "profile.pstat")
elif not options.operation_chain is None:
# Create operation chain for the given name
operation_chain = create_operation_chain(options.operation_chain)
# Store current source code for later inspection
create_source_archive(archive_path=operation_chain.get_output_directory())
if not options.profile:
# Execute the current operation_chain
run_operation_chain(default_backend, operation_chain)
else:
# Execute and profile operation
cProfile.runctx('pySPACE.run_operation_chain(default_backend, operation_chain)',
globals(), locals(),
filename=operation_chain.get_output_directory()\
+ os.sep + "profile.pstat")
else:
parser.error("Neither operation chain nor operation specification file given!")
logging.shutdown()
# Stop logger thread in backend
default_backend._stop_logging()
del default_backend
if __name__ == "__main__":
# run main with soft finishing
sys.exit(main())
# hard finish
os._exit(0)
| |
import datetime
import unittest
# 5 years from now (more or less)
fiveyrsfuture = datetime.datetime.utcnow() + datetime.timedelta(5*365)
class Test_static_view_use_subpath_False(unittest.TestCase):
def _getTargetClass(self):
from pyramid.static import static_view
return static_view
def _makeOne(self, *arg, **kw):
return self._getTargetClass()(*arg, **kw)
def _makeRequest(self, kw=None):
from pyramid.request import Request
environ = {
'wsgi.url_scheme':'http',
'wsgi.version':(1,0),
'SERVER_NAME':'example.com',
'SERVER_PORT':'6543',
'PATH_INFO':'/',
'SCRIPT_NAME':'',
'REQUEST_METHOD':'GET',
}
if kw is not None:
environ.update(kw)
return Request(environ=environ)
def test_ctor_defaultargs(self):
inst = self._makeOne('package:resource_name')
self.assertEqual(inst.package_name, 'package')
self.assertEqual(inst.docroot, 'resource_name')
self.assertEqual(inst.cache_max_age, 3600)
self.assertEqual(inst.index, 'index.html')
def test_call_adds_slash_path_info_empty(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest({'PATH_INFO':''})
context = DummyContext()
from pyramid.httpexceptions import HTTPMovedPermanently
self.assertRaises(HTTPMovedPermanently, inst, context, request)
def test_path_info_slash_means_index_html(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest()
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>static</html>' in response.body)
def test_oob_singledot(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest({'PATH_INFO':'/./index.html'})
context = DummyContext()
response = inst(context, request)
self.assertEqual(response.status, '200 OK')
self.assertTrue(b'<html>static</html>' in response.body)
def test_oob_emptyelement(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest({'PATH_INFO':'//index.html'})
context = DummyContext()
response = inst(context, request)
self.assertEqual(response.status, '200 OK')
self.assertTrue(b'<html>static</html>' in response.body)
def test_oob_dotdotslash(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest({'PATH_INFO':'/subdir/../../minimal.pt'})
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_oob_dotdotslash_encoded(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest(
{'PATH_INFO':'/subdir/%2E%2E%2F%2E%2E/minimal.pt'})
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_oob_os_sep(self):
import os
inst = self._makeOne('pyramid.tests:fixtures/static')
dds = '..' + os.sep
request = self._makeRequest({'PATH_INFO':'/subdir/%s%sminimal.pt' %
(dds, dds)})
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_resource_doesnt_exist(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest({'PATH_INFO':'/notthere'})
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_resource_isdir(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest({'PATH_INFO':'/subdir/'})
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>subdir</html>' in response.body)
def test_resource_is_file(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest({'PATH_INFO':'/index.html'})
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>static</html>' in response.body)
def test_resource_is_file_with_wsgi_file_wrapper(self):
from pyramid.response import _BLOCK_SIZE
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest({'PATH_INFO':'/index.html'})
class _Wrapper(object):
def __init__(self, file, block_size=None):
self.file = file
self.block_size = block_size
request.environ['wsgi.file_wrapper'] = _Wrapper
context = DummyContext()
response = inst(context, request)
app_iter = response.app_iter
self.assertTrue(isinstance(app_iter, _Wrapper))
self.assertTrue(b'<html>static</html>' in app_iter.file.read())
self.assertEqual(app_iter.block_size, _BLOCK_SIZE)
app_iter.file.close()
def test_resource_is_file_with_cache_max_age(self):
inst = self._makeOne('pyramid.tests:fixtures/static', cache_max_age=600)
request = self._makeRequest({'PATH_INFO':'/index.html'})
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>static</html>' in response.body)
self.assertEqual(len(response.headerlist), 5)
header_names = [ x[0] for x in response.headerlist ]
header_names.sort()
self.assertEqual(header_names,
['Cache-Control', 'Content-Length', 'Content-Type',
'Expires', 'Last-Modified'])
def test_resource_is_file_with_no_cache_max_age(self):
inst = self._makeOne('pyramid.tests:fixtures/static',
cache_max_age=None)
request = self._makeRequest({'PATH_INFO':'/index.html'})
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>static</html>' in response.body)
self.assertEqual(len(response.headerlist), 3)
header_names = [ x[0] for x in response.headerlist ]
header_names.sort()
self.assertEqual(
header_names,
['Content-Length', 'Content-Type', 'Last-Modified'])
def test_resource_notmodified(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest({'PATH_INFO':'/index.html'})
request.if_modified_since = fiveyrsfuture
context = DummyContext()
response = inst(context, request)
start_response = DummyStartResponse()
app_iter = response(request.environ, start_response)
try:
self.assertEqual(start_response.status, '304 Not Modified')
self.assertEqual(list(app_iter), [])
finally:
app_iter.close()
def test_not_found(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest({'PATH_INFO':'/notthere.html'})
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_resource_with_content_encoding(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest({'PATH_INFO':'/arcs.svg.tgz'})
context = DummyContext()
response = inst(context, request)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/x-tar')
self.assertEqual(response.content_encoding, 'gzip')
response.app_iter.close()
def test_resource_no_content_encoding(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest({'PATH_INFO':'/index.html'})
context = DummyContext()
response = inst(context, request)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'text/html')
self.assertEqual(response.content_encoding, None)
response.app_iter.close()
class Test_static_view_use_subpath_True(unittest.TestCase):
def _getTargetClass(self):
from pyramid.static import static_view
return static_view
def _makeOne(self, *arg, **kw):
kw['use_subpath'] = True
return self._getTargetClass()(*arg, **kw)
def _makeRequest(self, kw=None):
from pyramid.request import Request
environ = {
'wsgi.url_scheme':'http',
'wsgi.version':(1,0),
'SERVER_NAME':'example.com',
'SERVER_PORT':'6543',
'PATH_INFO':'/',
'SCRIPT_NAME':'',
'REQUEST_METHOD':'GET',
}
if kw is not None:
environ.update(kw)
return Request(environ=environ)
def test_ctor_defaultargs(self):
inst = self._makeOne('package:resource_name')
self.assertEqual(inst.package_name, 'package')
self.assertEqual(inst.docroot, 'resource_name')
self.assertEqual(inst.cache_max_age, 3600)
self.assertEqual(inst.index, 'index.html')
def test_call_adds_slash_path_info_empty(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest({'PATH_INFO':''})
request.subpath = ()
context = DummyContext()
from pyramid.httpexceptions import HTTPMovedPermanently
self.assertRaises(HTTPMovedPermanently, inst, context, request)
def test_path_info_slash_means_index_html(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest()
request.subpath = ()
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>static</html>' in response.body)
def test_oob_singledot(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('.', 'index.html')
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_oob_emptyelement(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('', 'index.html')
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_oob_dotdotslash(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('subdir', '..', '..', 'minimal.pt')
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_oob_dotdotslash_encoded(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('subdir', '%2E%2E', '%2E%2E', 'minimal.pt')
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_oob_os_sep(self):
import os
inst = self._makeOne('pyramid.tests:fixtures/static')
dds = '..' + os.sep
request = self._makeRequest()
request.subpath = ('subdir', dds, dds, 'minimal.pt')
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_resource_doesnt_exist(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('notthere,')
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
def test_resource_isdir(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('subdir',)
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>subdir</html>' in response.body)
def test_resource_is_file(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('index.html',)
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>static</html>' in response.body)
def test_resource_is_file_with_cache_max_age(self):
inst = self._makeOne('pyramid.tests:fixtures/static', cache_max_age=600)
request = self._makeRequest()
request.subpath = ('index.html',)
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>static</html>' in response.body)
self.assertEqual(len(response.headerlist), 5)
header_names = [ x[0] for x in response.headerlist ]
header_names.sort()
self.assertEqual(header_names,
['Cache-Control', 'Content-Length', 'Content-Type',
'Expires', 'Last-Modified'])
def test_resource_is_file_with_no_cache_max_age(self):
inst = self._makeOne('pyramid.tests:fixtures/static',
cache_max_age=None)
request = self._makeRequest()
request.subpath = ('index.html',)
context = DummyContext()
response = inst(context, request)
self.assertTrue(b'<html>static</html>' in response.body)
self.assertEqual(len(response.headerlist), 3)
header_names = [ x[0] for x in response.headerlist ]
header_names.sort()
self.assertEqual(
header_names,
['Content-Length', 'Content-Type', 'Last-Modified'])
def test_resource_notmodified(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest()
request.if_modified_since = fiveyrsfuture
request.subpath = ('index.html',)
context = DummyContext()
response = inst(context, request)
start_response = DummyStartResponse()
app_iter = response(request.environ, start_response)
try:
self.assertEqual(start_response.status, '304 Not Modified')
self.assertEqual(list(app_iter), [])
finally:
app_iter.close()
def test_not_found(self):
inst = self._makeOne('pyramid.tests:fixtures/static')
request = self._makeRequest()
request.subpath = ('notthere.html',)
context = DummyContext()
from pyramid.httpexceptions import HTTPNotFound
self.assertRaises(HTTPNotFound, inst, context, request)
class DummyContext:
pass
class DummyStartResponse:
status = ()
headers = ()
def __call__(self, status, headers):
self.status = status
self.headers = headers
| |
# oracle/zxjdbc.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the Oracle database via the zxjdbc JDBC connector.
JDBC Driver
-----------
The official Oracle JDBC driver is at
http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html.
"""
import decimal
import re
from sqlalchemy import sql, types as sqltypes, util
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, OracleExecutionContext
from sqlalchemy.engine import base, default
from sqlalchemy.sql import expression
import collections
SQLException = zxJDBC = None
class _ZxJDBCDate(sqltypes.Date):
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
else:
return value.date()
return process
class _ZxJDBCNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
#XXX: does the dialect return Decimal or not???
# if it does (in all cases), we could use a None processor as well as
# the to_float generic processor
if self.asdecimal:
def process(value):
if isinstance(value, decimal.Decimal):
return value
else:
return decimal.Decimal(str(value))
else:
def process(value):
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value
return process
class OracleCompiler_zxjdbc(OracleCompiler):
def returning_clause(self, stmt, returning_cols):
self.returning_cols = list(expression._select_iterables(returning_cols))
# within_columns_clause=False so that labels (foo AS bar) don't render
columns = [self.process(c, within_columns_clause=False, result_map=self.result_map)
for c in self.returning_cols]
if not hasattr(self, 'returning_parameters'):
self.returning_parameters = []
binds = []
for i, col in enumerate(self.returning_cols):
dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi)
self.returning_parameters.append((i + 1, dbtype))
bindparam = sql.bindparam("ret_%d" % i, value=ReturningParam(dbtype))
self.binds[bindparam.key] = bindparam
binds.append(self.bindparam_string(self._truncate_bindparam(bindparam)))
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
class OracleExecutionContext_zxjdbc(OracleExecutionContext):
def pre_exec(self):
if hasattr(self.compiled, 'returning_parameters'):
# prepare a zxJDBC statement so we can grab its underlying
# OraclePreparedStatement's getReturnResultSet later
self.statement = self.cursor.prepare(self.statement)
def get_result_proxy(self):
if hasattr(self.compiled, 'returning_parameters'):
rrs = None
try:
try:
rrs = self.statement.__statement__.getReturnResultSet()
rrs.next()
except SQLException, sqle:
msg = '%s [SQLCode: %d]' % (sqle.getMessage(), sqle.getErrorCode())
if sqle.getSQLState() is not None:
msg += ' [SQLState: %s]' % sqle.getSQLState()
raise zxJDBC.Error(msg)
else:
row = tuple(self.cursor.datahandler.getPyObject(rrs, index, dbtype)
for index, dbtype in self.compiled.returning_parameters)
return ReturningResultProxy(self, row)
finally:
if rrs is not None:
try:
rrs.close()
except SQLException:
pass
self.statement.close()
return base.ResultProxy(self)
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class ReturningResultProxy(base.FullyBufferedResultProxy):
"""ResultProxy backed by the RETURNING ResultSet results."""
def __init__(self, context, returning_row):
self._returning_row = returning_row
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
ret = []
for c in self.context.compiled.returning_cols:
if hasattr(c, 'name'):
ret.append((c.name, c.type))
else:
ret.append((c.anon_label, c.type))
return ret
def _buffer_rows(self):
return collections.deque([self._returning_row])
class ReturningParam(object):
"""A bindparam value representing a RETURNING parameter.
Specially handled by OracleReturningDataHandler.
"""
def __init__(self, type):
self.type = type
def __eq__(self, other):
if isinstance(other, ReturningParam):
return self.type == other.type
return NotImplemented
def __ne__(self, other):
if isinstance(other, ReturningParam):
return self.type != other.type
return NotImplemented
def __repr__(self):
kls = self.__class__
return '<%s.%s object at 0x%x type=%s>' % (kls.__module__, kls.__name__, id(self),
self.type)
class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
jdbc_db_name = 'oracle'
jdbc_driver_name = 'oracle.jdbc.OracleDriver'
statement_compiler = OracleCompiler_zxjdbc
execution_ctx_cls = OracleExecutionContext_zxjdbc
colspecs = util.update_copy(
OracleDialect.colspecs,
{
sqltypes.Date : _ZxJDBCDate,
sqltypes.Numeric: _ZxJDBCNumeric
}
)
def __init__(self, *args, **kwargs):
super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs)
global SQLException, zxJDBC
from java.sql import SQLException
from com.ziclix.python.sql import zxJDBC
from com.ziclix.python.sql.handler import OracleDataHandler
class OracleReturningDataHandler(OracleDataHandler):
"""zxJDBC DataHandler that specially handles ReturningParam."""
def setJDBCObject(self, statement, index, object, dbtype=None):
if type(object) is ReturningParam:
statement.registerReturnParameter(index, object.type)
elif dbtype is None:
OracleDataHandler.setJDBCObject(self, statement, index, object)
else:
OracleDataHandler.setJDBCObject(self, statement, index, object, dbtype)
self.DataHandler = OracleReturningDataHandler
def initialize(self, connection):
super(OracleDialect_zxjdbc, self).initialize(connection)
self.implicit_returning = connection.connection.driverversion >= '10.2'
def _create_jdbc_url(self, url):
return 'jdbc:oracle:thin:@%s:%s:%s' % (url.host, url.port or 1521, url.database)
def _get_server_version_info(self, connection):
version = re.search(r'Release ([\d\.]+)', connection.connection.dbversion).group(1)
return tuple(int(x) for x in version.split('.'))
dialect = OracleDialect_zxjdbc
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Save and restore variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import re
import time
from google.protobuf import text_format
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _evaluate(tensor):
"""Returns the numpy value of a tensor."""
if context.executing_eagerly():
return tensor.numpy()
return ops.get_default_session().run(tensor)
def _GetCheckpointFilename(save_dir, latest_filename):
"""Returns a filename for storing the CheckpointState.
Args:
save_dir: The directory for saving and restoring checkpoints.
latest_filename: Name of the file in 'save_dir' that is used
to store the CheckpointState.
Returns:
The path of the file that contains the CheckpointState proto.
"""
if latest_filename is None:
latest_filename = "checkpoint"
return os.path.join(save_dir, latest_filename)
@tf_export(v1=["train.generate_checkpoint_state_proto"])
def generate_checkpoint_state_proto(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Generates a checkpoint state proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
all_model_checkpoint_timestamps: A list of floats, indicating the number of
seconds since the Epoch when each checkpoint was generated.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.train.CheckpointManager` for an implementation).
Returns:
CheckpointState proto with model_checkpoint_path and
all_model_checkpoint_paths updated to either absolute paths or
relative paths to the current save_dir.
Raises:
ValueError: If `all_model_checkpoint_timestamps` was provided but its length
does not match `all_model_checkpoint_paths`.
"""
if all_model_checkpoint_paths is None:
all_model_checkpoint_paths = []
if (not all_model_checkpoint_paths or
all_model_checkpoint_paths[-1] != model_checkpoint_path):
logging.info("%s is not in all_model_checkpoint_paths. Manually adding it.",
model_checkpoint_path)
all_model_checkpoint_paths.append(model_checkpoint_path)
if (all_model_checkpoint_timestamps
and (len(all_model_checkpoint_timestamps)
!= len(all_model_checkpoint_paths))):
raise ValueError(
("Checkpoint timestamps, if provided, must match checkpoint paths (got "
"paths %s and timestamps %s)")
% (all_model_checkpoint_paths, all_model_checkpoint_timestamps))
# Relative paths need to be rewritten to be relative to the "save_dir"
# if model_checkpoint_path already contains "save_dir".
if not os.path.isabs(save_dir):
if not os.path.isabs(model_checkpoint_path):
model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)
for i, p in enumerate(all_model_checkpoint_paths):
if not os.path.isabs(p):
all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)
coord_checkpoint_proto = CheckpointState(
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
return coord_checkpoint_proto
@deprecation.deprecated(
date=None,
instructions=("Use `tf.train.CheckpointManager` to manage checkpoints "
"rather than manually editing the Checkpoint proto."))
@tf_export(v1=["train.update_checkpoint_state"])
def update_checkpoint_state(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
all_model_checkpoint_timestamps: Optional list of timestamps (floats,
seconds since the Epoch) indicating when the checkpoints in
`all_model_checkpoint_paths` were created.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.train.CheckpointManager` for an implementation).
Raises:
RuntimeError: If any of the model checkpoint paths conflict with the file
containing CheckpointSate.
"""
update_checkpoint_state_internal(
save_dir=save_dir,
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
latest_filename=latest_filename,
save_relative_paths=False,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
def update_checkpoint_state_internal(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None,
save_relative_paths=False,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
save_relative_paths: If `True`, will write relative paths to the checkpoint
state file.
all_model_checkpoint_timestamps: Optional list of timestamps (floats,
seconds since the Epoch) indicating when the checkpoints in
`all_model_checkpoint_paths` were created.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.train.CheckpointManager` for an implementation).
Raises:
RuntimeError: If any of the model checkpoint paths conflict with the file
containing CheckpointSate.
"""
# Writes the "checkpoint" file for the coordinator for later restoration.
coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)
if save_relative_paths:
if os.path.isabs(model_checkpoint_path):
rel_model_checkpoint_path = os.path.relpath(
model_checkpoint_path, save_dir)
else:
rel_model_checkpoint_path = model_checkpoint_path
rel_all_model_checkpoint_paths = []
for p in all_model_checkpoint_paths:
if os.path.isabs(p):
rel_all_model_checkpoint_paths.append(os.path.relpath(p, save_dir))
else:
rel_all_model_checkpoint_paths.append(p)
ckpt = generate_checkpoint_state_proto(
save_dir,
rel_model_checkpoint_path,
all_model_checkpoint_paths=rel_all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
else:
ckpt = generate_checkpoint_state_proto(
save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
if coord_checkpoint_filename == ckpt.model_checkpoint_path:
raise RuntimeError("Save path '%s' conflicts with path used for "
"checkpoint state. Please use a different save path." %
model_checkpoint_path)
# Preventing potential read/write race condition by *atomically* writing to a
# file.
file_io.atomic_write_string_to_file(coord_checkpoint_filename,
text_format.MessageToString(ckpt))
@tf_export("train.get_checkpoint_state")
def get_checkpoint_state(checkpoint_dir, latest_filename=None):
"""Returns CheckpointState proto from the "checkpoint" file.
If the "checkpoint" file contains a valid CheckpointState
proto, returns it.
Args:
checkpoint_dir: The directory of checkpoints.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
Returns:
A CheckpointState if the state was available, None
otherwise.
Raises:
ValueError: if the checkpoint read doesn't have model_checkpoint_path set.
"""
ckpt = None
coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir,
latest_filename)
f = None
try:
# Check that the file exists before opening it to avoid
# many lines of errors from colossus in the logs.
if file_io.file_exists(coord_checkpoint_filename):
file_content = file_io.read_file_to_string(
coord_checkpoint_filename)
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
if not ckpt.model_checkpoint_path:
raise ValueError("Invalid checkpoint state loaded from "
+ checkpoint_dir)
# For relative model_checkpoint_path and all_model_checkpoint_paths,
# prepend checkpoint_dir.
if not os.path.isabs(ckpt.model_checkpoint_path):
ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,
ckpt.model_checkpoint_path)
for i, p in enumerate(ckpt.all_model_checkpoint_paths):
if not os.path.isabs(p):
ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)
except errors.OpError as e:
# It's ok if the file cannot be read
logging.warning("%s: %s", type(e).__name__, e)
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
except text_format.ParseError as e:
logging.warning("%s: %s", type(e).__name__, e)
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
finally:
if f:
f.close()
return ckpt
def _prefix_to_checkpoint_path(prefix, format_version):
"""Returns the pathname of a checkpoint file, given the checkpoint prefix.
For V1 checkpoint, simply returns the prefix itself (the data file). For V2,
returns the pathname to the index file.
Args:
prefix: a string, the prefix of a checkpoint.
format_version: the checkpoint format version that corresponds to the
prefix.
Returns:
The pathname of a checkpoint file, taking into account the checkpoint
format version.
"""
if format_version == saver_pb2.SaverDef.V2:
return prefix + ".index" # The index file identifies a checkpoint.
return prefix # Just the data file.
@tf_export("train.latest_checkpoint")
def latest_checkpoint(checkpoint_dir, latest_filename=None):
"""Finds the filename of latest saved checkpoint file.
Gets the checkpoint state given the provided checkpoint_dir and looks for a
corresponding TensorFlow 2 (preferred) or TensorFlow 1.x checkpoint path.
The latest_filename argument is only applicable if you are saving checkpoint
using `v1.train.Saver.save`
See the [Training Checkpoints
Guide](https://www.tensorflow.org/guide/checkpoint) for more details and
examples.`
Args:
checkpoint_dir: Directory where the variables were saved.
latest_filename: Optional name for the protocol buffer file that
contains the list of most recent checkpoint filenames.
See the corresponding argument to `v1.train.Saver.save`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was found.
"""
# Pick the latest checkpoint based on checkpoint state.
ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)
if ckpt and ckpt.model_checkpoint_path:
# Look for either a V2 path or a V1 path, with priority for V2.
v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
saver_pb2.SaverDef.V2)
v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
saver_pb2.SaverDef.V1)
if file_io.get_matching_files(v2_path) or file_io.get_matching_files(
v1_path):
return ckpt.model_checkpoint_path
else:
logging.error("Couldn't match files for checkpoint %s",
ckpt.model_checkpoint_path)
return None
def checkpoint_exists_internal(checkpoint_prefix):
"""Checks whether a V1 or V2 checkpoint exists with the specified prefix.
This is an internal function to check if a checkpoint exists,
since it takes into account the naming difference between V1 and V2 formats.
Args:
checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking
priority. Typically the result of `Saver.save()` or that of
`tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or
V1/V2.
Returns:
A bool, true if a checkpoint referred to by `checkpoint_prefix` exists.
"""
pathname = _prefix_to_checkpoint_path(checkpoint_prefix,
saver_pb2.SaverDef.V2)
if file_io.get_matching_files(pathname):
return True
elif file_io.get_matching_files(checkpoint_prefix):
return True
else:
return False
@deprecation.deprecated(
date=None,
instructions="Use standard file APIs to check for files with this prefix.")
@tf_export(v1=["train.checkpoint_exists"])
def checkpoint_exists(checkpoint_prefix):
"""Checks whether a V1 or V2 checkpoint exists with the specified prefix.
This is the recommended way to check if a checkpoint exists, since it takes
into account the naming difference between V1 and V2 formats.
Args:
checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking
priority. Typically the result of `Saver.save()` or that of
`tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or
V1/V2.
Returns:
A bool, true if a checkpoint referred to by `checkpoint_prefix` exists.
"""
return checkpoint_exists_internal(checkpoint_prefix)
@deprecation.deprecated(
date=None,
instructions="Use standard file utilities to get mtimes.")
@tf_export(v1=["train.get_checkpoint_mtimes"])
def get_checkpoint_mtimes(checkpoint_prefixes):
"""Returns the mtimes (modification timestamps) of the checkpoints.
Globs for the checkpoints pointed to by `checkpoint_prefixes`. If the files
exist, collect their mtime. Both V2 and V1 checkpoints are considered, in
that priority.
This is the recommended way to get the mtimes, since it takes into account
the naming difference between V1 and V2 formats.
Note: If not all checkpoints exist, the length of the returned mtimes list
will be smaller than the length of `checkpoint_prefixes` list, so mapping
checkpoints to corresponding mtimes will not be possible.
Args:
checkpoint_prefixes: a list of checkpoint paths, typically the results of
`Saver.save()` or those of `tf.train.latest_checkpoint()`, regardless of
sharded/non-sharded or V1/V2.
Returns:
A list of mtimes (in microseconds) of the found checkpoints.
"""
mtimes = []
def match_maybe_append(pathname):
fnames = file_io.get_matching_files(pathname)
if fnames:
mtimes.append(file_io.stat(fnames[0]).mtime_nsec / 1e9)
return True
return False
for checkpoint_prefix in checkpoint_prefixes:
# Tries V2's metadata file first.
pathname = _prefix_to_checkpoint_path(checkpoint_prefix,
saver_pb2.SaverDef.V2)
if match_maybe_append(pathname):
continue
# Otherwise, tries V1, where the prefix is the complete pathname.
match_maybe_append(checkpoint_prefix)
return mtimes
@deprecation.deprecated(
date=None,
instructions="Use standard file APIs to delete files with this prefix.")
@tf_export(v1=["train.remove_checkpoint"])
def remove_checkpoint(checkpoint_prefix,
checkpoint_format_version=saver_pb2.SaverDef.V2,
meta_graph_suffix="meta"):
"""Removes a checkpoint given by `checkpoint_prefix`.
Args:
checkpoint_prefix: The prefix of a V1 or V2 checkpoint. Typically the result
of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of
sharded/non-sharded or V1/V2.
checkpoint_format_version: `SaverDef.CheckpointFormatVersion`, defaults to
`SaverDef.V2`.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
"""
_delete_file_if_exists(
meta_graph_filename(checkpoint_prefix, meta_graph_suffix))
if checkpoint_format_version == saver_pb2.SaverDef.V2:
# V2 has a metadata file and some data files.
_delete_file_if_exists(checkpoint_prefix + ".index")
_delete_file_if_exists(checkpoint_prefix + ".data-?????-of-?????")
else:
# V1, Legacy. Exact match on the data file.
_delete_file_if_exists(checkpoint_prefix)
def _delete_file_if_exists(filespec):
"""Deletes files matching `filespec`."""
for pathname in file_io.get_matching_files(filespec):
file_io.delete_file(pathname)
def meta_graph_filename(checkpoint_filename, meta_graph_suffix="meta"):
"""Returns the meta graph filename.
Args:
checkpoint_filename: Name of the checkpoint file.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
Returns:
MetaGraph file name.
"""
# If the checkpoint_filename is sharded, the checkpoint_filename could
# be of format model.ckpt-step#-?????-of-shard#. For example,
# model.ckpt-123456-?????-of-00005, or model.ckpt-123456-00001-of-00002.
basename = re.sub(r"-[\d\?]+-of-\d+$", "", checkpoint_filename)
suffixed_filename = ".".join([basename, meta_graph_suffix])
return suffixed_filename
# TODO(allenl): Allow tf.keras.Model instances in the constructor directly?
@tf_export("train.CheckpointManager")
class CheckpointManager(object):
"""Manages multiple checkpoints by keeping some and deleting unneeded ones.
Example usage:
```python
import tensorflow as tf
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
manager = tf.train.CheckpointManager(
checkpoint, directory="/tmp/model", max_to_keep=5)
status = checkpoint.restore(manager.latest_checkpoint)
while True:
# train
manager.save()
```
`CheckpointManager` preserves its own state across instantiations (see the
`__init__` documentation for details). Only one should be active in a
particular directory at a time.
"""
def __init__(self,
checkpoint,
directory,
max_to_keep,
keep_checkpoint_every_n_hours=None,
checkpoint_name="ckpt",
step_counter=None,
checkpoint_interval=None,
init_fn=None):
"""Configure a `CheckpointManager` for use in `directory`.
If a `CheckpointManager` was previously used in `directory`, its
state will be restored. This includes the list of managed checkpoints and
the timestamp bookkeeping necessary to support
`keep_checkpoint_every_n_hours`. The behavior of the new `CheckpointManager`
will be the same as the previous `CheckpointManager`, including cleaning up
existing checkpoints if appropriate.
Checkpoints are only considered for deletion just after a new checkpoint has
been added. At that point, `max_to_keep` checkpoints will remain in an
"active set". Once a checkpoint is preserved by
`keep_checkpoint_every_n_hours` it will not be deleted by this
`CheckpointManager` or any future `CheckpointManager` instantiated in
`directory` (regardless of the new setting of
`keep_checkpoint_every_n_hours`). The `max_to_keep` checkpoints in the
active set may be deleted by this `CheckpointManager` or a future
`CheckpointManager` instantiated in `directory` (subject to its
`max_to_keep` and `keep_checkpoint_every_n_hours` settings).
`CheckpointManager` can be also used for initializing the model if
there is no checkpoints for restoring in `directory`. An example usage is:
>>> import tempfile
>>> tmp_dir = tempfile.mkdtemp()
>>> checkpoint = tf.train.Checkpoint()
>>> init_path = checkpoint.save(os.path.join(tmp_dir, 'init'))
>>> def init_fn():
... # Partially restore the checkpoint from `init_path`.
... checkpoint.restore(init_path)
>>> manager = tf.train.CheckpointManager(
... checkpoint,
... directory=os.path.join(tmp_dir, 'ckpt'),
... max_to_keep=None,
... init_fn=init_fn)
>>> # `restore_or_initialize` will call `init_fn` if there is no existing
>>> # checkpoint in `directory`.
>>> manager.restore_or_initialize()
Args:
checkpoint: The `tf.train.Checkpoint` instance to save and manage
checkpoints for.
directory: The path to a directory in which to write checkpoints. A
special file named "checkpoint" is also written to this directory (in a
human-readable text format) which contains the state of the
`CheckpointManager`.
max_to_keep: An integer, the number of checkpoints to keep. Unless
preserved by `keep_checkpoint_every_n_hours`, checkpoints will be
deleted from the active set, oldest first, until only `max_to_keep`
checkpoints remain. If `None`, no checkpoints are deleted and everything
stays in the active set. Note that `max_to_keep=None` will keep all
checkpoint paths in memory and in the checkpoint state protocol buffer
on disk.
keep_checkpoint_every_n_hours: Upon removal from the active set, a
checkpoint will be preserved if it has been at least
`keep_checkpoint_every_n_hours` since the last preserved checkpoint. The
default setting of `None` does not preserve any checkpoints in this way.
checkpoint_name: Custom name for the checkpoint file.
step_counter: A `tf.Variable` instance for checking the current step
counter value, in case users want to save checkpoints every N steps.
checkpoint_interval: An integer, indicates the minimum step interval
between two checkpoints.
init_fn: Callable. A function to do customized intialization if no
checkpoints are in the directory.
Raises:
ValueError: If `max_to_keep` is not a positive integer.
"""
self._checkpoint = checkpoint
self._save_counter_assign = None
if max_to_keep is not None and max_to_keep <= 0:
raise ValueError(
("Expected a positive integer or `None` for `max_to_keep`, "
"got %d.")
% (max_to_keep,))
self._max_to_keep = max_to_keep
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._directory = directory
self._checkpoint_prefix = os.path.join(directory, checkpoint_name)
self._init_fn = init_fn
if checkpoint_interval is not None:
if step_counter is None:
raise ValueError("`step_counter` should be passed if "
"`checkpoint_interval` is not None.")
self._last_checkpoint_step = None
self._step_counter = step_counter
self._checkpoint_interval = checkpoint_interval
recovered_state = get_checkpoint_state(directory)
current_clock = time.time()
self._maybe_delete = collections.OrderedDict()
if recovered_state is None:
self._latest_checkpoint = None
# Set the clock back slightly to avoid race conditions when quickly
# re-creating a CheckpointManager.
self._last_preserved_timestamp = current_clock - 1.
else:
self._latest_checkpoint = recovered_state.model_checkpoint_path
self._last_preserved_timestamp = recovered_state.last_preserved_timestamp
if current_clock < self._last_preserved_timestamp:
# Time seems to have reversed itself. In addition to this warning, we'll
# min() saved checkpoint timestamps with the current time to ensure that
# old checkpoints don't get deleted accidentally.
logging.warning(
("time.time() returned a value %f seconds behind the last "
"preserved checkpoint timestamp.")
% (self._last_preserved_timestamp - current_clock,))
self._last_preserved_timestamp = current_clock
all_timestamps = recovered_state.all_model_checkpoint_timestamps
all_paths = recovered_state.all_model_checkpoint_paths
del recovered_state # Uses modified values from now on
if not all_timestamps:
all_timestamps = [self._last_preserved_timestamp] * len(all_paths)
for filename, timestamp in zip(all_paths, all_timestamps):
timestamp = min(timestamp, current_clock)
if timestamp > self._last_preserved_timestamp:
self._maybe_delete[filename] = timestamp
@property
def directory(self):
return self._directory
@property
def checkpoint_interval(self):
return self._checkpoint_interval
@property
def latest_checkpoint(self):
"""The prefix of the most recent checkpoint in `directory`.
Equivalent to `tf.train.latest_checkpoint(directory)` where `directory` is
the constructor argument to `CheckpointManager`.
Suitable for passing to `tf.train.Checkpoint.restore` to resume training.
Returns:
The checkpoint prefix. If there are no checkpoints, returns `None`.
"""
return self._latest_checkpoint
@property
def checkpoints(self):
"""A list of managed checkpoints.
Note that checkpoints saved due to `keep_checkpoint_every_n_hours` will not
show up in this list (to avoid ever-growing filename lists).
Returns:
A list of filenames, sorted from oldest to newest.
"""
return list(self._maybe_delete.keys())
def _sweep(self):
"""Deletes or preserves managed checkpoints."""
if not self._max_to_keep:
# Does not update self._last_preserved_timestamp, since everything is kept
# in the active set.
return
while len(self._maybe_delete) > self._max_to_keep:
filename, timestamp = self._maybe_delete.popitem(last=False)
# Even if we're keeping this checkpoint due to
# keep_checkpoint_every_n_hours, we won't reference it to avoid
# infinitely-growing CheckpointState protos.
if (self._keep_checkpoint_every_n_hours
and (timestamp - self._keep_checkpoint_every_n_hours * 3600.
>= self._last_preserved_timestamp)):
self._last_preserved_timestamp = timestamp
continue
_delete_file_if_exists(filename + ".index")
_delete_file_if_exists(filename + ".data-?????-of-?????")
def _record_state(self):
"""Saves the `CheckpointManager`'s state in `directory`."""
filenames, timestamps = zip(*self._maybe_delete.items())
update_checkpoint_state_internal(
self._directory,
model_checkpoint_path=self.latest_checkpoint,
all_model_checkpoint_paths=filenames,
all_model_checkpoint_timestamps=timestamps,
last_preserved_timestamp=self._last_preserved_timestamp,
save_relative_paths=True)
@property
def _prefix(self):
"""A common prefix for all checkpoints saved with this manager.
For example, if `directory` (a constructor argument) were `"/tmp/tf-model"`,
`prefix` would be `"/tmp/tf-model/ckpt"` and checkpoints would generally be
numbered `"/tmp/tf-model/ckpt-1"`, `"/tmp/tf-model/ckpt-2"`, and so on. Each
checkpoint has several associated files
(e.g. `"/tmp/tf-model/ckpt-2.index"`).
Returns:
A string prefix.
"""
return self._checkpoint_prefix
@property
def checkpoint(self):
"""Returns the `tf.train.Checkpoint` object."""
return self._checkpoint
def save(self, checkpoint_number=None, check_interval=True):
"""Creates a new checkpoint and manages it.
Args:
checkpoint_number: An optional integer, or an integer-dtype `Variable` or
`Tensor`, used to number the checkpoint. If `None` (default),
checkpoints are numbered using `checkpoint.save_counter`. Even if
`checkpoint_number` is provided, `save_counter` is still incremented. A
user-provided `checkpoint_number` is not incremented even if it is a
`Variable`.
check_interval: An optional boolean. The argument is only effective when
`checkpoint_interval` is passed into the manager. If `True`, the manager
will only save the checkpoint if the interval between checkpoints is
larger than `checkpoint_interval`. Otherwise it will always save the
checkpoint unless a checkpoint has already been saved for the current
step.
Returns:
The path to the new checkpoint. It is also recorded in the `checkpoints`
and `latest_checkpoint` properties. `None` if no checkpoint is saved.
"""
if self._checkpoint_interval is not None:
current_step = _evaluate(self._step_counter)
if self._last_checkpoint_step is not None:
if current_step == self._last_checkpoint_step:
return None
if check_interval and current_step < (
self._last_checkpoint_step + self._checkpoint_interval):
return None
self._last_checkpoint_step = current_step
# Save counter logic duplicated from tf.train.Checkpoint, soon to diverge
# slightly with a custom numbering option.
if context.executing_eagerly():
save_counter = self._checkpoint.save_counter
save_counter.assign_add(1)
session = None
else:
session = ops.get_default_session()
def _initializing_creator(next_creator, **kwargs):
"""Initialize the save counter if it has been newly created."""
v = next_creator(**kwargs)
session.run(v.initializer)
return v
with variable_scope.variable_creator_scope(_initializing_creator):
save_counter = self._checkpoint.save_counter
if self._save_counter_assign is None:
self._save_counter_assign = save_counter.assign_add(1, read_value=False)
session.run(self._save_counter_assign)
if checkpoint_number is None:
checkpoint_number = save_counter
if not isinstance(checkpoint_number, compat.integral_types):
checkpoint_number = training_util.global_step(
sess=session, global_step_tensor=checkpoint_number)
prefix = "%s-%d" % (self._prefix, checkpoint_number)
save_path = self._checkpoint.write(prefix)
timestamp = time.time()
# If this is an overwritten checkpoint we were previously tracking, delete
# and reinsert it to make sure it goes to the end of the queue.
if save_path in self._maybe_delete:
del self._maybe_delete[save_path]
self._maybe_delete[save_path] = timestamp
self._latest_checkpoint = save_path
# Before deleting anything we update the Checkpoint proto with the new
# checkpoint. We'll go back and correct it after cleaning up old files, but
# a preemption while deleting will be more likely to see the new checkpoint
# this way.
self._record_state()
self._sweep()
# Write out the Checkpoint proto a second time, now without the deleted
# checkpoints.
self._record_state()
return save_path
def restore_or_initialize(self):
"""Restore items in `checkpoint` from the latest checkpoint file.
This method will first try to restore from the most recent checkpoint in
`directory`. If no checkpoints exist in `directory`, and `init_fn` is
specified, this method will call `init_fn` to do customized
initialization. This can be used to support initialization from pretrained
models.
Note that unlike `tf.train.Checkpoint.restore()`, this method doesn't return
a load status object that users can run assertions on
(e.g. assert_consumed()). Thus to run assertions, users should directly use
`tf.train.Checkpoint.restore()` method.
Returns:
The restored checkpoint path if the lastest checkpoint is found and
restored. Otherwise None.
"""
if self._latest_checkpoint is not None:
self._checkpoint.restore(self._latest_checkpoint)
if self._checkpoint_interval is not None:
self._last_checkpoint_step = _evaluate(self._step_counter)
return self._latest_checkpoint
if self._init_fn is not None:
self._init_fn()
return None
| |
#!/usr/bin/env python
"""
Created by: Lee Bergstrand (2017)
Description: A parser for parsing genome properties flat files into a rooted DAG of genome properties.
"""
from pygenprop.step import Step
from pygenprop.evidence import Evidence
from pygenprop.genome_property import GenomeProperty
from pygenprop.functional_element import FunctionalElement
from pygenprop.database_reference import DatabaseReference
from pygenprop.tree import GenomePropertiesTree
from pygenprop.literature_reference import LiteratureReference
from itertools import groupby
def parse_genome_properties_flat_file(genome_property_file):
"""
A parses a genome property flat file.
:param genome_property_file: A genome property file handle object.
:return: A GenomePropertyTree object.
"""
genome_properties = []
current_genome_property_record = []
for line in genome_property_file:
if not line.strip() == '//':
current_genome_property_record.append(create_marker_and_content(line))
else:
collapsed_genome_property_record = unwrap_genome_property_record(current_genome_property_record)
new_genome_property = parse_genome_property(collapsed_genome_property_record)
genome_properties.append(new_genome_property)
current_genome_property_record = []
genome_properties_tree = GenomePropertiesTree(*genome_properties)
return genome_properties_tree
def create_marker_and_content(genome_property_flat_file_line):
"""
Splits a list of lines from a genome property file into marker, content pairs.
:param genome_property_flat_file_line: A line from a genome property flat file line.
:return: A tuple containing a marker, content pair.
"""
columns = genome_property_flat_file_line.split(' ')
marker = columns[0].strip()
content = ''.join(columns[1:]).rstrip()
return marker, content
def unwrap_genome_property_record(genome_property_record):
"""
The standard genome property record wraps every 80 lines. This function unwraps the record.
:param genome_property_record: A list of marker, content tuples representing genome property flat file lines.
:return: A list of reduced redundancy markers, content tuples representing genome property flat file lines.
Consecutive markers (often 'CC' and '**') markers are collapsed to one tuple.
"""
collapsed_genome_property_record = []
non_collapse_makers = ('EV', 'RQ')
# Bin rows with consecutive markers using groupby. Collapse consecutive markers in bin.
for bin_marker, binned in groupby(genome_property_record, lambda x: x[0]):
bin_contents = (row_content for row_marker, row_content in binned)
if bin_marker in non_collapse_makers:
for content in bin_contents:
collapsed_genome_property_record.append((bin_marker, content))
else:
collapsed_genome_property_record.append((bin_marker, ' '.join(bin_contents)))
return collapsed_genome_property_record
def parse_genome_property(genome_property_record):
"""
Parses a single genome property from a genome property record.
:param genome_property_record: A list of marker, content tuples representing genome property flat file lines.
:return: A single genome property object.
"""
# A list of record markers related to the genome property.
core_genome_property_markers = ('AC', 'DE', 'TP', 'TH', 'PN', 'CC', '**')
gathered_core_genome_property_markers = {}
reference_index = False
database_index = False
step_index = False
current_index = 0
for marker, content in genome_property_record:
if marker == 'RN':
if not reference_index:
reference_index = current_index
elif marker == 'DC':
if not database_index:
database_index = current_index
elif marker == '--':
step_index = current_index + 1
break # If we have reach steps we have covered all core_genome_property_markers and can leave the loop.
elif marker in core_genome_property_markers:
if marker == 'TH':
content = int(content)
gathered_core_genome_property_markers[marker] = content
current_index = current_index + 1
if reference_index:
if database_index:
reference_rows = genome_property_record[reference_index:database_index]
else:
reference_rows = genome_property_record[reference_index:]
references = parse_literature_references(reference_rows)
else:
references = []
if database_index:
if step_index:
database_rows = genome_property_record[database_index:step_index - 1]
else:
database_rows = genome_property_record[database_index:]
databases = parse_database_references(database_rows)
else:
databases = []
if step_index:
step_rows = genome_property_record[step_index:]
steps = parse_steps(step_rows)
else:
steps = []
new_genome_property = GenomeProperty(accession_id=gathered_core_genome_property_markers.get('AC'),
name=gathered_core_genome_property_markers.get('DE'),
property_type=gathered_core_genome_property_markers.get('TP'),
threshold=gathered_core_genome_property_markers.get('TH'),
parents=gathered_core_genome_property_markers.get('PN'),
description=gathered_core_genome_property_markers.get('CC'),
private_notes=gathered_core_genome_property_markers.get('**'),
references=references,
databases=databases,
steps=steps)
for step in new_genome_property.steps:
step.parent = new_genome_property
return new_genome_property
def parse_database_references(genome_property_record):
"""
Parses database reference from a genome properties record.
:param genome_property_record: A list of marker, content tuples representing genome property flat file lines.
:return: A list of DatabaseReference objects.
"""
database_reference_markers = ('DC', 'DR')
database_references = []
current_database_reference = {}
for marker, content in genome_property_record:
if marker in database_reference_markers:
if marker in current_database_reference:
database_references.append(DatabaseReference(record_title=current_database_reference.get('DC'),
database_name=current_database_reference.get('DN'),
record_ids=current_database_reference.get('DI')))
current_database_reference = {marker: content}
else:
if marker == 'DR':
split_content = filter(None, content.split(';'))
cleaned_content = list(map(lambda evidence: evidence.strip(), split_content))
database_name = cleaned_content[0]
database_records = cleaned_content[1:]
current_database_reference['DN'] = database_name
current_database_reference['DI'] = database_records
current_database_reference[marker] = content
database_references.append(DatabaseReference(record_title=current_database_reference.get('DC'),
database_name=current_database_reference.get('DN'),
record_ids=current_database_reference.get('DI')))
return database_references
def parse_literature_references(genome_property_record):
"""
Parses literature references from a genome properties record.
:param genome_property_record: A list of marker, content tuples representing genome property flat file lines.
:return: A list of LiteratureReference objects.
"""
# A list of record markers related to literature references.
literature_reference_markers = ('RN', 'RM', 'RT', 'RA', 'RL')
literature_references = []
current_literature_reference = {}
for marker, content in genome_property_record:
if marker in literature_reference_markers:
if marker in current_literature_reference:
literature_references.append(LiteratureReference(number=current_literature_reference.get('RN'),
pubmed_id=current_literature_reference.get('RM'),
title=current_literature_reference.get('RT'),
authors=current_literature_reference.get('RA'),
citation=current_literature_reference.get('RL')))
if marker == 'RN':
content = int(content.strip('[]'))
current_literature_reference = {marker: content}
else:
if marker == 'RN':
content = int(content.strip('[]'))
current_literature_reference[marker] = content
literature_references.append(LiteratureReference(number=current_literature_reference.get('RN'),
pubmed_id=current_literature_reference.get('RM'),
title=current_literature_reference.get('RT'),
authors=current_literature_reference.get('RA'),
citation=current_literature_reference.get('RL')))
return literature_references
def parse_steps(genome_property_record):
"""
Parses steps from a genome properties record.
:param genome_property_record: A list of marker, content tuples representing genome property flat file lines.
:return: A list of Step objects.
"""
step_markers = ('SN', 'ID', 'DN', 'RQ', 'EV', 'TG')
steps = []
current_step_markers = []
step_number = 0
for marker, content in genome_property_record:
if marker in step_markers:
if not marker == 'SN':
current_step_markers.append((marker, content))
else:
if current_step_markers:
functional_elements = parse_functional_elements(current_step_markers)
steps.append(Step(number=step_number, functional_elements=functional_elements))
current_step_markers = []
step_number = int(content)
else:
step_number = int(content)
functional_elements = parse_functional_elements(current_step_markers)
steps.append(Step(number=step_number, functional_elements=functional_elements))
return steps
def parse_functional_elements(genome_property_record):
"""
Parses functional_elements from a genome properties record.
:param genome_property_record: A list of marker, content tuples representing genome property flat file lines.
:return: A list of functional_element objects.
"""
functional_element_markers = ('ID', 'DN', 'RQ')
functional_elements = []
current_functional_element = {}
evidence_markers = ('EV', 'TG')
current_evidence = []
for marker, content in genome_property_record:
if marker in functional_element_markers:
if marker in current_functional_element:
found_evidence = parse_evidences(current_evidence)
current_evidence = []
functional_elements.append(FunctionalElement(identifier=current_functional_element.get('ID'),
name=current_functional_element.get('DN'),
required=current_functional_element.get('RQ'),
evidence=found_evidence))
current_functional_element = {marker: content}
else:
if marker == 'RQ': # Required should true content is 1.
if int(content) == 1:
content = True
else:
content = False
current_functional_element[marker] = content
elif marker in evidence_markers:
current_evidence.append((marker, content))
else:
continue # Move on if marker is not a functional element marker or evidence marker.
if current_evidence:
evidence = parse_evidences(current_evidence)
else:
evidence = None
functional_elements.append(FunctionalElement(identifier=current_functional_element.get('ID'),
name=current_functional_element.get('DN'),
required=current_functional_element.get('RQ'),
evidence=evidence))
return functional_elements
def parse_evidences(genome_property_record):
"""
Parses evidences from a genome properties record.
:param genome_property_record: A list of marker, content tuples representing genome property flat file lines.
:return: A list of evidence objects.
"""
evidence_markers = ('EV', 'TG')
evidences = []
current_evidence = {}
for marker, content in genome_property_record:
if marker in evidence_markers:
if marker in current_evidence:
new_evidence = parse_single_evidence(current_evidence)
evidences.append(new_evidence)
current_evidence = {marker: content}
else:
if marker == 'EV' or marker == 'TG':
current_evidence[marker] = content
new_evidence = parse_single_evidence(current_evidence)
evidences.append(new_evidence)
return evidences
def parse_single_evidence(current_evidence_dictionary):
"""
The creates an Evidence object from a pair of EV and TG tag content strings.
:param current_evidence_dictionary: A dictionary containing EV and TG to content string mappings.
:return: An Evidence object.
"""
evidence_string = current_evidence_dictionary.get('EV')
gene_ontology_string = current_evidence_dictionary.get('TG')
sufficient = False
if evidence_string:
evidence_identifiers = extract_identifiers(evidence_string)
if 'sufficient' in evidence_string:
sufficient = True
else:
evidence_identifiers = None
if gene_ontology_string:
gene_ontology_identifiers = extract_identifiers(gene_ontology_string)
else:
gene_ontology_identifiers = None
new_evidence = Evidence(evidence_identifiers=evidence_identifiers,
gene_ontology_terms=gene_ontology_identifiers, sufficient=sufficient)
return new_evidence
def extract_identifiers(identifier_string):
"""
Parse database or Genprop identifiers from an EV or TG tag content string.
:param identifier_string: The contents string from a EV or TG tag.
:return: A list of identifiers.
"""
split_content = filter(None, identifier_string.split(';'))
cleaned_content = map(lambda evidence: evidence.strip(), split_content)
identifiers = list([evidence for evidence in cleaned_content if evidence != 'sufficient'])
return identifiers
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__doc__ = """
Implementation of the standard AST output converters.
"""
__all__ = (
'TermBuilder', 'LiteralTermBuilder',
'InfixTermBuilder', 'PrefixTermBuilder', 'PostfixTermBuilder',
'tree_converters'
)
try:
from psyco.classes import *
except ImportError:
pass
from itertools import *
from mathml.termparser import (ConverterRegistry,
TERM_OPERATOR_ORDER, BOOL_CMP_OPERATORS)
import collections
class TermBuilder(object):
"Abstract superclass for term builders."
OPERATOR_ORDER = list(op for ops in (TERM_OPERATOR_ORDER, '| in',
BOOL_CMP_OPERATORS, 'and xor or')
for op in ops.split() )
OPERATOR_SET = frozenset(OPERATOR_ORDER)
_OPERATOR_MAP = {}
def __init__(self):
self.__dispatcher = self._register_handlers({})
self.__map_operator = self._OPERATOR_MAP.get
def _register_handlers(self, dispatcher_dict):
"""Subclasses can modify the dictionary returned by this
method to register additional handlers.
Note that all handler methods must return iterables!"""
for name in dir(self):
if name.startswith('_handle_'):
method = getattr(self, name)
if isinstance(method, collections.Callable):
dispatcher_dict[ name[8:] ] = method
return dispatcher_dict
def build(self, tree):
"Call this method to build the term representation."
status = self._init_build_status()
return ' '.join( self._recursive_build(tree, status) )
def _init_build_status(self):
"To be overwritten by subclasses."
return None
def _map_operator(self, operator):
"To be overwritten by subclasses."
return self.__map_operator(operator, operator)
def _build_children(self, operator, children, status):
if operator == 'name' or operator[:6] == 'const:':
return children
return [ ' '.join(operand)
for operand in map(self._recursive_build, children, repeat(status)) ]
def _handle(self, operator, operands, status):
"Unknown operators (including functions) end up here."
raise NotImplementedError("_handle(%s)" % operator)
def _handleOP(self, operator, operands, status):
"Arithmetic and boolean operators end up here. Default is to call self._handle()"
return self._handle(operator, operands, status)
def _recursive_build(self, tree, status):
dispatcher = self.__dispatcher
operator = tree[0]
operands = self._build_children(operator, tree[1:], status)
dispatch_name = operator.replace(':', '_') # const:*, list:*
dispatch = dispatcher.get(dispatch_name)
if dispatch:
return dispatch(operator, operands, status)
splitpos = operator.find(':')
if splitpos > 0:
dispatch = dispatcher.get(operator[:splitpos])
if dispatch:
return dispatch(operator, operands, status)
if operator in self.OPERATOR_SET:
return self._handleOP(operator, operands, status)
else:
return self._handle(operator, operands, status)
class LiteralTermBuilder(TermBuilder):
"Abstract superclass for literal term builders."
_INTERVAL_NOTATION = {
'closed' : '[%s]',
'closed-open' : '[%s)',
'open-closed' : '(%s]',
'open' : '(%s)'
}
_NAME_MAP = {}
def _handle_name(self, operator, operands, affin):
name = str(operands[0])
# name = str(str(operands[0]), 'ascii')
return [ self._NAME_MAP.get(name, name) ]
def _handle_const_bool(self, operator, operands, status):
return [ operands[0] and 'true' or 'false' ]
def _handle_const_complex(self, operator, operands, status):
value = operands[0]
return [ '(%s%s%si)' % (value.real_str, (value.imag >= 0) and '+' or '', value.imag_str) ]
def _handle_const_rational(self, operator, operands, status):
value = operands[0]
return [ '(%s/%s)' % (value.num_str, value.denom_str) ]
def _handle_const_enotation(self, operator, operands, status):
return [ str(operands[0]) ]
def _handle_const(self, operator, operands, status):
return [ str(operands[0]).lower() ]
# return [ str(str(operands[0]).lower(), 'ascii') ]
def _handle_list(self, operator, operands, status):
assert operator == 'list'
return [ '(%s)' % ','.join(operands) ]
def _handle_interval(self, operator, operands, status):
assert operator[:9] == 'interval:'
return [ self._INTERVAL_NOTATION[ operator[9:] ] % ','.join(operands) ]
class InfixTermBuilder(LiteralTermBuilder):
"TermBuilder that converts the parse tree into a literal infix term."
MAX_AFFIN = len(TermBuilder.OPERATOR_ORDER)+1
__operator_order = TermBuilder.OPERATOR_ORDER.index
def _init_build_status(self):
return (self.MAX_AFFIN, self.MAX_AFFIN)
def _find_affin(self, operator, affin_status):
try:
affin = self.__operator_order(operator)
except ValueError:
if operator == 'case':
affin = self.MAX_AFFIN
else:
affin = affin_status
return (affin, affin_status[0])
def _build_children(self, operator, children, affin_status):
if operator == '-' and len(children) == 1:
affin = (0, affin_status[0])
else:
affin = self._find_affin(operator, affin_status)
return super(InfixTermBuilder, self)._build_children(operator, children, affin)
def _handle_case(self, operator, operands, affin_status):
assert operator == 'case'
result = [ 'CASE', 'WHEN', operands[0], 'THEN', operands[1] ]
if len(operands) > 2:
result.append('ELSE')
result.append(operands[2])
result.append('END')
return result
def _handleOP(self, operator, operands, affin_status):
output_operator = self._map_operator(operator)
my_affin, parent_affin = self._find_affin(operator, affin_status)
if my_affin >= parent_affin:
if len(operands) == 1:
return ['(', output_operator, operands[0], ')'] # safe bet
else:
return chain(chain(*list(zip(chain('(', repeat(output_operator)), operands))), ')')
else:
if len(operands) == 1:
return [output_operator, operands[0]]
else:
return chain((operands[0],), chain(*list(zip(chain(repeat(output_operator)),
islice(operands, 1, None)))))
def _handle(self, operator, operands, affin_status):
return [ self._map_operator(operator), '(', ', '.join(operands), ')' ]
class PostfixTermBuilder(LiteralTermBuilder):
"TermBuilder that converts the parse tree into a literal postfix term."
def _handle_case(self, operator, operands, _):
assert operator == 'case'
if len(operands) > 2:
operator = 'CASE_THEN_ELSE'
else:
operator = 'CASE_THEN'
return chain(reversed(operands), (self._map_operator(operator),))
def _handle(self, operator, operands, _):
if operator == '-' and len(operands) == 1:
return [ operands[0], self._map_operator('+-') ]
else:
return chain(operands, repeat(self._map_operator(operator), max(1, len(operands)-1)))
class PrefixTermBuilder(LiteralTermBuilder):
"TermBuilder that converts the parse tree into a literal prefix term."
def _handle_case(self, operator, operands, _):
assert operator == 'case'
if len(operands) > 2:
operator = 'CASE_THEN_ELSE'
else:
operator = 'CASE_THEN'
return chain((self._map_operator(operator),), reversed(operands))
def _handle(self, operator, operands, _):
if operator == '-' and len(operands) == 1:
return [ operands[0], self._map_operator('+-') ]
else:
return chain(repeat(self._map_operator(operator), max(1, len(operands)-1)), operands)
# converter registry:
class TermGeneration(ConverterRegistry):
"Objects of this class are used to reference the different converters."
_METHOD_NAME = 'build'
def convert_tree(self, tree, output_type):
"Convert a parse tree into a term of the given output type."
converter = self._converters[output_type]
return converter.build(tree)
tree_converters = TermGeneration()
tree_converters.register_converter('infix', InfixTermBuilder())
tree_converters.register_converter('prefix', PrefixTermBuilder())
tree_converters.register_converter('postfix', PostfixTermBuilder())
| |
import numpy as np
import fitsio
import os
import logging
logger = logging.getLogger('legacypipe.outliers')
def info(*args):
from legacypipe.utils import log_info
log_info(logger, args)
def debug(*args):
from legacypipe.utils import log_debug
log_debug(logger, args)
from legacypipe.bits import OUTLIER_POS, OUTLIER_NEG
def get_bits_to_mask():
return OUTLIER_POS | OUTLIER_NEG
def read_outlier_mask_file(survey, tims, brickname, subimage=True, output=True, ps=None,
pos_neg_mask=None,
outlier_mask_file=None, apply_masks=True, get_headers=False):
'''if subimage=True, assume that 'tims' are subimages, and demand that they have the same
x0,y0 pixel offsets and size as the outlier mask files.
if subimage=False, assume that 'tims' are full-CCD images, and
apply the mask to the relevant subimage.
*output* determines where we search for the file: treating it as output, or input?
'''
from legacypipe.bits import DQ_BITS
headers = []
if outlier_mask_file is None:
if output == 'both':
# Try both output=True and then output=False.
fn = survey.find_file('outliers_mask', brick=brickname, output=True)
if not os.path.exists(fn):
fn2 = survey.find_file('outliers_mask', brick=brickname, output=False)
print('Outlier mask does not exist:', fn)
print('Trying outlier mask:', fn2)
fn = fn2
else:
fn = survey.find_file('outliers_mask', brick=brickname, output=output)
else:
fn = outlier_mask_file
if not os.path.exists(fn):
print('Failed to apply outlier mask: No such file:', fn)
return False
F = fitsio.FITS(fn)
for tim in tims:
extname = '%s-%s-%s' % (tim.imobj.camera, tim.imobj.expnum, tim.imobj.ccdname)
if not extname in F:
print('WARNING: Did not find extension', extname, 'in outlier-mask file', fn)
return False
mask = F[extname].read()
hdr = F[extname].read_header()
if subimage:
if mask.shape != tim.shape:
print('Warning: Outlier mask', fn, 'does not match shape of tim', tim)
return False
if get_headers:
headers.append(hdr)
x0 = hdr['X0']
y0 = hdr['Y0']
maskbits = get_bits_to_mask()
if subimage:
if x0 != tim.x0 or y0 != tim.y0:
print('Warning: Outlier mask', fn, 'x0,y0 does not match that of tim', tim)
return False
if apply_masks:
# Apply this mask!
tim.dq |= tim.dq_type(((mask & maskbits) > 0) * DQ_BITS['outlier'])
tim.inverr[(mask & maskbits) > 0] = 0.
if pos_neg_mask is not None:
pos_neg_mask |= mask
else:
from astrometry.util.miscutils import get_overlapping_region
mh,mw = mask.shape
th,tw = tim.shape
my,ty = get_overlapping_region(tim.y0, tim.y0 + th - 1, y0, y0 + mh - 1)
mx,tx = get_overlapping_region(tim.x0, tim.x0 + tw - 1, x0, x0 + mw - 1)
if my == [] or mx == []:
# no overlap
continue
# have to shift the "m" slices down by x0,y0
my = slice(my.start - y0, my.stop - y0)
mx = slice(mx.start - x0, mx.stop - x0)
if apply_masks:
# Apply this mask!
tim.dq[ty, tx] |= tim.dq_type(((mask[my, mx] & maskbits) > 0) * DQ_BITS['outlier'])
tim.inverr[ty, tx][(mask[my, mx] & maskbits) > 0] = 0.
if pos_neg_mask is not None:
pos_neg_mask[ty,tx] |= mask[my, mx]
if ps is not None:
import pylab as plt
print('Mask extent: x [%i, %i], vs tim extent x [%i, %i]' % (x0, x0+mw, tim.x0, tim.x0+tw))
print('Mask extent: y [%i, %i], vs tim extent y [%i, %i]' % (y0, y0+mh, tim.y0, tim.y0+th))
print('x slice: mask', mx, 'tim', tx)
print('y slice: mask', my, 'tim', ty)
print('tim shape:', tim.shape)
print('mask shape:', mask.shape)
newdq = np.zeros(tim.shape, bool)
newdq[ty, tx] = ((mask[my, mx] & maskbits) > 0)
print('Total of', np.sum(newdq), 'pixels masked')
plt.clf()
plt.imshow(tim.getImage(), interpolation='nearest', origin='lower', vmin=-2.*tim.sig1, vmax=5.*tim.sig1, cmap='gray')
ax = plt.axis()
from legacypipe.detection import plot_boundary_map
plot_boundary_map(newdq, iterations=3, rgb=(0,128,255))
plt.axis(ax)
ps.savefig()
plt.axis([tx.start, tx.stop, ty.start, ty.stop])
ps.savefig()
if get_headers:
return headers
return True
def mask_outlier_pixels(survey, tims, bands, targetwcs, brickname, version_header,
mp=None, plots=False, ps=None, make_badcoadds=True,
refstars=None):
from legacypipe.bits import DQ_BITS
from scipy.ndimage.morphology import binary_dilation
H,W = targetwcs.shape
if make_badcoadds:
badcoadds_pos = []
badcoadds_neg = []
else:
badcoadds_pos = None
badcoadds_neg = None
star_veto = np.zeros(targetwcs.shape, np.bool)
if refstars:
gaia = refstars[refstars.isgaia]
# Not moving Gaia stars to epoch of individual images...
_,bx,by = targetwcs.radec2pixelxy(gaia.ra, gaia.dec)
bx -= 1.
by -= 1.
# Radius to mask around Gaia stars, in arcsec
radius = 1.0
pixrad = radius / targetwcs.pixel_scale()
for x,y in zip(bx,by):
xlo = int(np.clip(np.floor(x - pixrad), 0, W-1))
xhi = int(np.clip(np.ceil (x + pixrad), 0, W-1))
ylo = int(np.clip(np.floor(y - pixrad), 0, H-1))
yhi = int(np.clip(np.ceil (y + pixrad), 0, H-1))
if xlo == xhi or ylo == yhi:
continue
r2 = (((np.arange(ylo,yhi+1) - y)**2)[:,np.newaxis] +
((np.arange(xlo,xhi+1) - x)**2)[np.newaxis,:])
star_veto[ylo:yhi+1, xlo:xhi+1] |= (r2 < pixrad)
# if plots:
# import pylab as plt
# plt.clf()
# plt.imshow(star_veto, interpolation='nearest', origin='lower',
# vmin=0, vmax=1, cmap='hot')
# ax = plt.axis()
# plt.plot(bx, by, 'r.')
# plt.axis(ax)
# plt.title('Star vetos')
# ps.savefig()
with survey.write_output('outliers_mask', brick=brickname) as out:
# empty Primary HDU
out.fits.write(None, header=version_header)
for band in bands:
btims = [tim for tim in tims if tim.band == band]
if len(btims) == 0:
continue
debug(len(btims), 'images for band', band)
H,W = targetwcs.shape
# Build blurred reference image
sigs = np.array([tim.psf_sigma for tim in btims])
debug('PSF sigmas:', sigs)
targetsig = max(sigs) + 0.5
addsigs = np.sqrt(targetsig**2 - sigs**2)
debug('Target sigma:', targetsig)
debug('Blur sigmas:', addsigs)
coimg = np.zeros((H,W), np.float32)
cow = np.zeros((H,W), np.float32)
masks = np.zeros((H,W), np.int16)
results = mp.imap_unordered(
blur_resample_one, [(i_btim,tim,sig,targetwcs)
for i_btim,(tim,sig) in enumerate(zip(btims,addsigs))])
for i_btim,r in results:
if r is None:
continue
Yo,Xo,iacc,wacc,macc = r
coimg[Yo,Xo] += iacc
cow [Yo,Xo] += wacc
masks[Yo,Xo] |= macc
del Yo,Xo,iacc,wacc,macc
del r
del results
#
veto = np.logical_or(star_veto,
np.logical_or(
binary_dilation(masks & DQ_BITS['bleed'], iterations=3),
binary_dilation(masks & DQ_BITS['satur'], iterations=10)))
del masks
# if plots:
# plt.clf()
# plt.imshow(veto, interpolation='nearest', origin='lower', cmap='gray')
# plt.title('SATUR, BLEED veto (%s band)' % band)
# ps.savefig()
R = mp.imap_unordered(
compare_one, [(i_btim, tim, sig, targetwcs, coimg, cow, veto, make_badcoadds, plots,ps)
for i_btim,(tim,sig) in enumerate(zip(btims,addsigs))])
del coimg, cow, veto
badcoadd_pos = None
badcoadd_neg = None
if make_badcoadds:
badcoadd_pos = np.zeros((H,W), np.float32)
badcon_pos = np.zeros((H,W), np.int16)
badcoadd_neg = np.zeros((H,W), np.float32)
badcon_neg = np.zeros((H,W), np.int16)
for i_btim,r in R:
tim = btims[i_btim]
if r is None:
# none masked
mask = np.zeros(tim.shape, np.uint8)
else:
mask,badco = r
if make_badcoadds:
badhot, badcold = badco
yo,xo,bimg = badhot
badcoadd_pos[yo, xo] += bimg
badcon_pos [yo, xo] += 1
yo,xo,bimg = badcold
badcoadd_neg[yo, xo] += bimg
badcon_neg [yo, xo] += 1
del yo,xo,bimg, badhot,badcold
del badco
del r
# Apply the mask!
maskbits = get_bits_to_mask()
tim.inverr[(mask & maskbits) > 0] = 0.
tim.dq[(mask & maskbits) > 0] |= tim.dq_type(DQ_BITS['outlier'])
# Write output!
from legacypipe.utils import copy_header_with_wcs
hdr = copy_header_with_wcs(None, tim.subwcs)
hdr.add_record(dict(name='IMTYPE', value='outlier_mask',
comment='LegacySurvey image type'))
hdr.add_record(dict(name='CAMERA', value=tim.imobj.camera))
hdr.add_record(dict(name='EXPNUM', value=tim.imobj.expnum))
hdr.add_record(dict(name='CCDNAME', value=tim.imobj.ccdname))
hdr.add_record(dict(name='X0', value=tim.x0))
hdr.add_record(dict(name='Y0', value=tim.y0))
# HCOMPRESS;: 943k
# GZIP_1: 4.4M
# GZIP: 4.4M
# RICE: 2.8M
extname = '%s-%s-%s' % (tim.imobj.camera, tim.imobj.expnum, tim.imobj.ccdname)
out.fits.write(mask, header=hdr, extname=extname, compress='HCOMPRESS')
del R
if make_badcoadds:
badcoadd_pos /= np.maximum(badcon_pos, 1)
badcoadd_neg /= np.maximum(badcon_neg, 1)
del badcon_pos, badcon_neg
badcoadds_pos.append(badcoadd_pos)
badcoadds_neg.append(badcoadd_neg)
return badcoadds_pos,badcoadds_neg
def compare_one(X):
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.morphology import binary_dilation
from astrometry.util.resample import resample_with_wcs,OverlapError
(i_tim,tim,sig,targetwcs, coimg,cow, veto, make_badcoadds, plots,ps) = X
if plots:
import pylab as plt
H,W = targetwcs.shape
img = gaussian_filter(tim.getImage(), sig)
try:
Yo,Xo,Yi,Xi,[rimg] = resample_with_wcs(
targetwcs, tim.subwcs, [img], intType=np.int16)
except OverlapError:
return i_tim,None
del img
blurnorm = 1./(2. * np.sqrt(np.pi) * sig)
wt = tim.getInvvar()[Yi,Xi] / np.float32(blurnorm**2)
if Xi.dtype != np.int16:
Yi = Yi.astype(np.int16)
Xi = Xi.astype(np.int16)
# Compare against reference image...
maskedpix = np.zeros(tim.shape, np.uint8)
# Subtract this image from the coadd
otherwt = cow[Yo,Xo] - wt
otherimg = (coimg[Yo,Xo] - rimg*wt) / np.maximum(otherwt, 1e-16)
this_sig1 = 1./np.sqrt(np.median(wt[wt>0]))
## FIXME -- this image edges??
# Compute the error on our estimate of (thisimg - co) =
# sum in quadrature of the errors on thisimg and co.
with np.errstate(divide='ignore'):
diffvar = 1./wt + 1./otherwt
sndiff = (rimg - otherimg) / np.sqrt(diffvar)
with np.errstate(divide='ignore'):
reldiff = ((rimg - otherimg) / np.maximum(otherimg, this_sig1))
if plots:
plt.clf()
showimg = np.zeros((H,W),np.float32)
showimg[Yo,Xo] = otherimg
plt.subplot(2,3,1)
plt.imshow(showimg, interpolation='nearest', origin='lower', vmin=-0.01, vmax=0.1,
cmap='gray')
plt.title('other images')
showimg[Yo,Xo] = otherwt
plt.subplot(2,3,2)
plt.imshow(showimg, interpolation='nearest', origin='lower', vmin=0)
plt.title('other wt')
showimg[Yo,Xo] = sndiff
plt.subplot(2,3,3)
plt.imshow(showimg, interpolation='nearest', origin='lower', vmin=-10, vmax=10,cmap='RdBu_r')
plt.title('S/N diff')
showimg[Yo,Xo] = rimg
plt.subplot(2,3,4)
plt.imshow(showimg, interpolation='nearest', origin='lower', vmin=-0.01, vmax=0.1,
cmap='gray')
plt.title('this image')
showimg[Yo,Xo] = wt
plt.subplot(2,3,5)
plt.imshow(showimg, interpolation='nearest', origin='lower', vmin=0)
plt.title('this wt')
plt.suptitle(tim.name)
showimg[Yo,Xo] = reldiff
plt.subplot(2,3,6)
plt.imshow(showimg, interpolation='nearest', origin='lower', vmin=-4, vmax=4, cmap='RdBu_r')
plt.title('rel diff')
ps.savefig()
# from astrometry.util.plotutils import loghist
# plt.clf()
# loghist(sndiff.ravel(), reldiff.ravel(),
# bins=100)
# plt.xlabel('S/N difference')
# plt.ylabel('Relative difference')
# plt.title('Outliers: ' + tim.name)
# ps.savefig()
del otherimg
# Significant pixels
hotpix = ((sndiff > 5.) * (reldiff > 2.) *
(otherwt > 1e-16) * (wt > 0.) *
(veto[Yo,Xo] == False))
coldpix = ((sndiff < -5.) * (reldiff < -2.) *
(otherwt > 1e-16) * (wt > 0.) *
(veto[Yo,Xo] == False))
del reldiff, otherwt
if (not np.any(hotpix)) and (not np.any(coldpix)):
return i_tim,None
hot = np.zeros((H,W), bool)
hot[Yo,Xo] = hotpix
cold = np.zeros((H,W), bool)
cold[Yo,Xo] = coldpix
del hotpix, coldpix
snmap = np.zeros((H,W), np.float32)
snmap[Yo,Xo] = sndiff
hot = binary_dilation(hot, iterations=1)
cold = binary_dilation(cold, iterations=1)
if plots:
heat = np.zeros(hot.shape, np.int8)
heat += hot
heat -= cold
# "warm"
hot = np.logical_or(hot,
binary_dilation(hot, iterations=5) * (snmap > 3.))
hot = binary_dilation(hot, iterations=1)
cold = np.logical_or(cold,
binary_dilation(cold, iterations=5) * (snmap < -3.))
cold = binary_dilation(cold, iterations=1)
if plots:
heat += hot
heat -= cold
# "lukewarm"
hot = np.logical_or(hot,
binary_dilation(hot, iterations=5) * (snmap > 2.))
hot = binary_dilation(hot, iterations=3)
cold = np.logical_or(cold,
binary_dilation(cold, iterations=5) * (snmap < -2.))
cold = binary_dilation(cold, iterations=3)
if plots:
heat += hot
heat -= cold
plt.clf()
plt.imshow(heat, interpolation='nearest', origin='lower', cmap='RdBu_r', vmin=-3, vmax=+3)
plt.title(tim.name + ': outliers')
ps.savefig()
del heat
del snmap
badco = None
if make_badcoadds:
bad, = np.nonzero(hot[Yo,Xo])
badhot = (Yo[bad], Xo[bad], tim.getImage()[Yi[bad],Xi[bad]])
bad, = np.nonzero(cold[Yo,Xo])
badcold = (Yo[bad], Xo[bad], tim.getImage()[Yi[bad],Xi[bad]])
badco = badhot,badcold
# Actually do the masking!
# Resample "hot" (in brick coords) back to tim coords.
try:
mYo,mXo,mYi,mXi,_ = resample_with_wcs(
tim.subwcs, targetwcs, intType=np.int16)
except OverlapError:
return i_tim,None
Ibad, = np.nonzero(hot[mYi,mXi])
Ibad2, = np.nonzero(cold[mYi,mXi])
info(tim, ': masking', len(Ibad), 'positive outlier pixels and', len(Ibad2), 'negative outlier pixels')
maskedpix[mYo[Ibad], mXo[Ibad]] = OUTLIER_POS
maskedpix[mYo[Ibad2], mXo[Ibad2]] = OUTLIER_NEG
return i_tim, (maskedpix,badco)
def blur_resample_one(X):
from scipy.ndimage.filters import gaussian_filter
from astrometry.util.resample import resample_with_wcs,OverlapError
i_tim,tim,sig,targetwcs = X
img = gaussian_filter(tim.getImage(), sig)
try:
Yo,Xo,Yi,Xi,[rimg] = resample_with_wcs(
targetwcs, tim.subwcs, [img], intType=np.int16)
except OverlapError:
return i_tim, None
del img
blurnorm = 1./(2. * np.sqrt(np.pi) * sig)
wt = tim.getInvvar()[Yi,Xi] / (blurnorm**2)
return i_tim, (Yo, Xo, rimg*wt, wt, tim.dq[Yi,Xi])
def patch_from_coadd(coimgs, targetwcs, bands, tims, mp=None):
H,W = targetwcs.shape
ibands = dict([(b,i) for i,b in enumerate(bands)])
for tim in tims:
ie = tim.getInvError()
img = tim.getImage()
if np.any(ie == 0):
# Patch from the coadd
# resample from coadd to img -- nearest-neighbour
iy,ix = np.nonzero(ie == 0)
if len(iy) == 0:
continue
ra,dec = tim.subwcs.pixelxy2radec(ix+1, iy+1)[-2:]
_,xx,yy = targetwcs.radec2pixelxy(ra, dec)
xx = (xx - 1. + 0.5).astype(np.int16)
yy = (yy - 1. + 0.5).astype(np.int16)
keep = (xx >= 0) * (xx < W) * (yy >= 0) * (yy < H)
if not np.any(keep):
continue
img[iy[keep],ix[keep]] = coimgs[ibands[tim.band]][yy[keep],xx[keep]]
| |
from typing import Optional, Union
from sanic.helpers import STATUS_CODES
class SanicException(Exception):
message: str = ""
def __init__(
self,
message: Optional[Union[str, bytes]] = None,
status_code: Optional[int] = None,
quiet: Optional[bool] = None,
) -> None:
if message is None and self.message:
message = self.message
if message is None and status_code is not None:
msg: bytes = STATUS_CODES.get(status_code, b"")
message = msg.decode("utf8")
super().__init__(message)
if status_code is not None:
self.status_code = status_code
# quiet=None/False/True with None meaning choose by status
if quiet or quiet is None and status_code not in (None, 500):
self.quiet = True
class NotFound(SanicException):
"""
**Status**: 404 Not Found
"""
status_code = 404
quiet = True
class InvalidUsage(SanicException):
"""
**Status**: 400 Bad Request
"""
status_code = 400
quiet = True
class MethodNotSupported(SanicException):
"""
**Status**: 405 Method Not Allowed
"""
status_code = 405
quiet = True
def __init__(self, message, method, allowed_methods):
super().__init__(message)
self.headers = {"Allow": ", ".join(allowed_methods)}
class ServerError(SanicException):
"""
**Status**: 500 Internal Server Error
"""
status_code = 500
class ServiceUnavailable(SanicException):
"""
**Status**: 503 Service Unavailable
The server is currently unavailable (because it is overloaded or
down for maintenance). Generally, this is a temporary state.
"""
status_code = 503
quiet = True
class URLBuildError(ServerError):
"""
**Status**: 500 Internal Server Error
"""
status_code = 500
class FileNotFound(NotFound):
"""
**Status**: 404 Not Found
"""
def __init__(self, message, path, relative_url):
super().__init__(message)
self.path = path
self.relative_url = relative_url
class RequestTimeout(SanicException):
"""The Web server (running the Web site) thinks that there has been too
long an interval of time between 1) the establishment of an IP
connection (socket) between the client and the server and
2) the receipt of any data on that socket, so the server has dropped
the connection. The socket connection has actually been lost - the Web
server has 'timed out' on that particular socket connection.
"""
status_code = 408
quiet = True
class PayloadTooLarge(SanicException):
"""
**Status**: 413 Payload Too Large
"""
status_code = 413
quiet = True
class HeaderNotFound(InvalidUsage):
"""
**Status**: 400 Bad Request
"""
status_code = 400
quiet = True
class ContentRangeError(SanicException):
"""
**Status**: 416 Range Not Satisfiable
"""
status_code = 416
quiet = True
def __init__(self, message, content_range):
super().__init__(message)
self.headers = {"Content-Range": f"bytes */{content_range.total}"}
class HeaderExpectationFailed(SanicException):
"""
**Status**: 417 Expectation Failed
"""
status_code = 417
quiet = True
class Forbidden(SanicException):
"""
**Status**: 403 Forbidden
"""
status_code = 403
quiet = True
class InvalidRangeType(ContentRangeError):
"""
**Status**: 416 Range Not Satisfiable
"""
status_code = 416
quiet = True
class PyFileError(Exception):
def __init__(self, file):
super().__init__("could not execute config file %s", file)
class Unauthorized(SanicException):
"""
**Status**: 401 Unauthorized
:param message: Message describing the exception.
:param status_code: HTTP Status code.
:param scheme: Name of the authentication scheme to be used.
When present, kwargs is used to complete the WWW-Authentication header.
Examples::
# With a Basic auth-scheme, realm MUST be present:
raise Unauthorized("Auth required.",
scheme="Basic",
realm="Restricted Area")
# With a Digest auth-scheme, things are a bit more complicated:
raise Unauthorized("Auth required.",
scheme="Digest",
realm="Restricted Area",
qop="auth, auth-int",
algorithm="MD5",
nonce="abcdef",
opaque="zyxwvu")
# With a Bearer auth-scheme, realm is optional so you can write:
raise Unauthorized("Auth required.", scheme="Bearer")
# or, if you want to specify the realm:
raise Unauthorized("Auth required.",
scheme="Bearer",
realm="Restricted Area")
"""
status_code = 401
quiet = True
def __init__(self, message, status_code=None, scheme=None, **kwargs):
super().__init__(message, status_code)
# if auth-scheme is specified, set "WWW-Authenticate" header
if scheme is not None:
values = ['{!s}="{!s}"'.format(k, v) for k, v in kwargs.items()]
challenge = ", ".join(values)
self.headers = {
"WWW-Authenticate": f"{scheme} {challenge}".rstrip()
}
class LoadFileException(SanicException):
pass
class InvalidSignal(SanicException):
pass
def abort(status_code: int, message: Optional[Union[str, bytes]] = None):
"""
Raise an exception based on SanicException. Returns the HTTP response
message appropriate for the given status code, unless provided.
STATUS_CODES from sanic.helpers for the given status code.
:param status_code: The HTTP status code to return.
:param message: The HTTP response body. Defaults to the messages in
"""
import warnings
warnings.warn(
"sanic.exceptions.abort has been marked as deprecated, and will be "
"removed in release 21.12.\n To migrate your code, simply replace "
"abort(status_code, msg) with raise SanicException(msg, status_code), "
"or even better, raise an appropriate SanicException subclass."
)
raise SanicException(message=message, status_code=status_code)
| |
# Copyright 2020-present Kensho Technologies, LLC.
from abc import ABCMeta, abstractmethod
from typing import AbstractSet, Any, Collection, Generic, Iterable, Mapping, Optional, Tuple
from ..compiler.metadata import FilterInfo
from ..typedefs import Literal, TypedDict
from .data_context import DataContext, DataToken
EdgeDirection = Literal["in", "out"]
EdgeInfo = Tuple[EdgeDirection, str] # direction + edge name
# TODO(predrag): Figure out a better type here. We need to balance between finding something
# easy and lightweight, and letting the user know about things like:
# optional edges, recursive edges, used fields/filters at the neighbor, etc.
# Will probably punt on this until the API is stabilized, since defining something
# here is not a breaking change.
NeighborHint = Any
class InterpreterHints(TypedDict):
"""Describe all known hint types.
Values of this type are intended to be used as "**hints" syntax in adapter calls.
"""
runtime_arg_hints: Mapping[str, Any] # the runtime arguments passed for this query
used_property_hints: AbstractSet[str] # the names of all property fields used within this scope
filter_hints: Collection[FilterInfo] # info on all filters used within this scope
neighbor_hints: Collection[Tuple[EdgeInfo, NeighborHint]] # info on all neighbors of this scope
class InterpreterAdapter(Generic[DataToken], metaclass=ABCMeta):
"""Base class defining the API for schema-aware interpreter functionality over some schema.
This ABC is the abstraction through which the rest of the interpreter is schema-agnostic:
the rest of the interpreter code simply takes an instance of InterpreterAdapter and performs
all schema-aware operations through its simple, four-method API.
## The DataToken type parameter
This class is generic on an implementer-chosen DataToken type, which to the rest of the library
represents an opaque reference to the data contained by a particular vertex in the data set
described by your chosen schema. For example, if building a subclass of InterpreterAdapter
called MyAdapter with dict as the DataToken type, MyAdapter should be defined as follows:
class MyAdapter(InterpreterAdapter[dict]):
...
Here are a few common examples of DataToken types in practice:
- a dict containing the type name of the vertex and the values of all its properties;
- a dataclass containing the type name of the vertex, and a collection name and primary key
that can be used to retrieve its property values from a database, or
- an instance of a custom class which has *some* of the values of the vertex properties, and
has sufficient information to look up the rest of them if they are ever requested.
The best choice of DataToken type is dependent on the specific use case, e.g. whether the data
is already available in Python memory, or is on a local disk, or is a network hop away.
Implementers are free to choose any DataToken type and the interpreter code will happily use it.
However, certain debugging and testing tools provided by this library will work best
when DataToken is a deep-copyable type that implements equality beyond
a simple referential equality check.
## The InterpreterAdapter API
The methods in the InterpreterAdapter API are all designed to support generator-style operation,
where data is produced and consumed only when required. Here is a high-level description of
the methods in the InterpreterAdapter API:
- get_tokens_of_type() produces an iterable of DataTokens of the type specified by its argument.
The calling function will wrap the DataTokens into a bookkeeping object called a DataContext,
where a particular token is currently active and specified in the "current_token" attribute.
- For an iterable of such DataContexts, project_property() can be used to get the value
of one of the properties on the vertex type represented by the currently active DataToken
in each DataContext; project_property() therefore returns an iterable of
tuples (data_context, value).
- project_neighbors() is similar: for an iterable of DataContexts and a specific edge name,
it returns an iterable (data_context, iterable_of_neighbor_tokens) where
iterable_of_neighbor_tokens yields a DataToken for each vertex that can be reached by
following the specified edge from data_context's vertex.
- can_coerce_to_type() is used to check whether a DataToken corresponding to one vertex type
can be safely converted into one representing a different vertex type. Given an iterable of
DataContexts and the name of the type to which the conversion is attempted, it produces
an iterable of tuples (data_context, can_coerce), where can_coerce is a boolean.
## Performance and optimization opportunities
The design of the API and its generator-style operation enable a variety of optimizations.
Many optimizations are applied automatically, and additional ones can be implemented with
minimal additional work. A few simple examples:
- Interpreters perform lazy evaluation by default: if exactly 3 query results are requested,
then only the minimal data necessary for *exactly 3* results' worth of outputs is loaded.
- When computing a particular result, data loading for output fields is deferred
until *after* all filtering operations have been completed, to minimize data loads.
- Data caching is easy to implement within this API -- simply have
your API function's implementation consult a cache before performing the requested operation.
- Batch-loading of data can be performed by simply advancing the input generator multiple times,
then operating on an entire batch of input data before producing corresponding outputs:
def project_property(
self,
data_contexts: Iterable[DataContext[DataToken]],
current_type_name: str,
field_name: str,
**hints: Any
) -> Iterable[Tuple[DataContext[DataToken], Any]]:
for data_context_batch in funcy.chunks(30, data_contexts):
# Data for 30 entries is now in data_context_batch, operate on it in bulk.
results_batch = compute_results_for_batch(
data_context_batch, current_type_name, field_name
)
yield from results_batch
Additionally, each of the four methods in the API takes several kwargs whose names
end with the suffix "_hints", in addition to the catch-all "**hints: Any" argument. These
provide each function with information about how the data it is currently processing will
be used in subsequent operations, and can therefore enable additional interesting optimizations.
Use of these hints is optional (the interpreter always assumes that the hints weren't used),
so subclasses of InterpreterAdapter may even safely ignore these kwargs entirely -- for example,
if the "runtime_arg_hints" kwarg is omitted in the method definition, at call time its value
will go into the catch-all "**hints" argument instead.
The set of hints (and the information each hint provides) could grow in the future. Currently,
the following hints are offered:
- runtime_arg_hints: the names and values of any runtime arguments provided to the query
for use in filtering operations (e.g. "$arg_name"); an empty mapping in queries
with no runtime arguments.
- used_property_hints: the property names in the current scope that are used by the query,
e.g. in a filter or as an output. Within project_neighbors(), the current scope is the
neighboring vertex; in the remaining 3 methods the current scope is the current vertex.
- filter_hints: information about the filters applied within the current scope,
such as "which filtering operation is being performed?" and "with which arguments?"
Within project_neighbors(), the current scope is the neighboring vertex; in
the remaining 3 methods the current scope is the current vertex.
- neighbor_hints: information about the edges originating from the current scope that
the query will eventually need to expand. Within project_neighbors(), the current scope is
the neighboring vertex; in the remaining 3 methods the current scope is the current vertex.
More details on these hints, and suggestions for their use, can be found in the methods'
docstrings, available below.
"""
@abstractmethod
def get_tokens_of_type(
self,
type_name: str,
*,
runtime_arg_hints: Optional[Mapping[str, Any]] = None,
used_property_hints: Optional[AbstractSet[str]] = None,
filter_hints: Optional[Collection[FilterInfo]] = None,
neighbor_hints: Optional[Collection[Tuple[EdgeInfo, NeighborHint]]] = None,
**hints: Any,
) -> Iterable[DataToken]:
"""Produce an iterable of tokens for the specified type name.
This function is used by the interpreter library to get the initial data with which
the process of query execution begins.
Consider the following example schema:
schema {
query: RootSchemaQuery
}
< ... some default GraphQL compiler directives and scalar type definitions here ... >
type Foo {
< ... some fields here ... >
}
< ... perhaps other type definitions here ... >
type RootSchemaQuery {
# This is the root query type for the schema, as defined at the top of the schema.
Foo: [Foo]
}
Per the GraphQL specification, since the definition of RootSchemaQuery only contains the
type named Foo, queries must start by querying for Foo in order to be valid for the schema:
{
Foo {
< ... stuff here ... >
}
}
To compute the results for such a query, the interpreter would call get_tokens_of_type()
with "Foo" as the type_name value. As get_tokens_of_type() yields tokens,
the interpreter uses those tokens to perform the rest of the query via
the remaining interpreter API methods.
get_tokens_of_type() is guaranteed to be called *exactly once* during the evaluation of
any interpreted query. However, due to the generator-style operation of the interpreter,
the call to get_tokens_of_type() is *not* guaranteed to be the first call across the four
methods that comprise this API -- one or more calls to the other methods may precede it.
Args:
type_name: name of the vertex type for which to yield tokens. Guaranteed to be:
- the name of a type defined in the schema being queried, and specifically
- one of the types defined in the schema's root query type:
http://spec.graphql.org/June2018/#sec-Root-Operation-Types
runtime_arg_hints: names and values of any runtime arguments provided to the query
for use in filtering operations (e.g. "$arg_name").
used_property_hints: the property names of the requested vertices that
are going to be used in a subsequent filtering or output step.
filter_hints: information about any filters applied to the requested vertices,
such as "which filtering operations are being performed?"
and "with which arguments?"
neighbor_hints: information about the edges originating from the requested vertices
that the query will eventually need to expand.
**hints: catch-all kwarg field making the function's signature forward-compatible with
future revisions of this library that add more hints.
Yields:
DataTokens corresponding to vertices of the specified type. The information supplied
via hints may, but is not required to, be applied to the returned DataToken objects.
For example, this function is allowed to yield a DataToken that will be filtered out
in a subsequent query step, even though the filter_hints argument (or other hints)
notified this function of that impending outcome.
"""
@abstractmethod
def project_property(
self,
data_contexts: Iterable[DataContext[DataToken]],
current_type_name: str,
field_name: str,
*,
runtime_arg_hints: Optional[Mapping[str, Any]] = None,
used_property_hints: Optional[AbstractSet[str]] = None,
filter_hints: Optional[Collection[FilterInfo]] = None,
neighbor_hints: Optional[Collection[Tuple[EdgeInfo, NeighborHint]]] = None,
**hints: Any,
) -> Iterable[Tuple[DataContext[DataToken], Any]]:
"""Produce the values for a given property for each of an iterable of input DataTokens.
In situations such as outputting property values or applying filters to properties,
the interpreter needs to get the value of some property field for a series of DataTokens.
For example, consider the following query:
{
Foo {
bar @output(out_name: "bar_value")
}
}
Once the interpreter has used the get_tokens_of_type() function to obtain
an iterable of DataTokens for the Foo type, it will automatically wrap each of them in
a "bookkeeping" object called DataContext. These DataContext objects allow
the interpreter to keep track of "which data came from where"; only the DataToken value
bound to each current_token attribute is relevant to the InterpreterAdapter API.
Having obtained an iterable of DataTokens and converted it to an iterable of DataContexts,
the interpreter needs to get the value of the "bar" property for the tokens bound to
the contexts. To do so, the interpreter calls project_property() with the iterable
of DataContexts, setting current_type_name = "Foo" and field_name = "bar", requesting
the "bar" property's value for each DataContext with its corresponding current_token.
If the DataContext's current_token attribute is set to None (which may happen
when @optional edges are used), the property's value is considered to be None.
A simple example implementation is as follows:
def project_property(
self,
data_contexts: Iterable[DataContext[DataToken]],
current_type_name: str,
field_name: str,
**hints: Any,
) -> Iterable[Tuple[DataContext[DataToken], Any]]:
for data_context in data_contexts:
current_token = data_context.current_token
property_value: Any
if current_token is None:
# Evaluating an @optional scope where the optional edge didn't exist.
# There is no value for the named property here.
property_value = None
else:
if field_name == "__typename":
# The query is requesting the runtime type of the current vertex.
# If current_type_name is an interface type, the runtime type of
# the current vertex may either be that interface type or
# a type that implements that interface. More info on "__typename"
# can be found at https://graphql.org/learn/queries/#meta-fields
property_value = < load the runtime type of the current_token vertex >
else:
property_value = (
< load the value of the field_name property for current_token >
)
# Remember to always yield the DataContext alongside the produced value.
yield data_context, property_value
Args:
data_contexts: iterable of DataContext objects which specify the DataTokens whose
property data needs to be loaded
current_type_name: name of the vertex type whose property needs to be loaded. Guaranteed
to be the name of a type defined in the schema being queried.
(current_type_name names the concrete type of DataToken contained in
the DataContext containers yielded by the data_context iterator.)
field_name: name of the property whose data needs to be loaded. Guaranteed to refer
either to a property that is defined in the supplied current_type_name
in the schema, or to the "__typename" meta field that is valid for all
GraphQL types and holds the type name of the current vertex. This type name
may be different from the value of current_type_name e.g. when
current_type_name refers to an interface type and "__typename" refers to
a type that implements that interface. More information on "__typename" may
be found in the GraphQL docs: https://graphql.org/learn/queries/#meta-fields
runtime_arg_hints: names and values of any runtime arguments provided to the query
for use in filtering operations (e.g. "$arg_name").
used_property_hints: the property names of the vertices being processed that
are going to be used in a subsequent filtering or output step.
filter_hints: information about any filters applied to the vertices being processed,
such as "which filtering operations are being performed?"
and "with which arguments?"
neighbor_hints: information about the edges of the vertices being processed
that the query will eventually need to expand.
**hints: catch-all kwarg field making the function's signature forward-compatible with
future revisions of this library that add more hints.
Yields:
tuples (data_context, property_value), providing the value of the requested property
together with the DataContext corresponding to that value. The yielded DataContext
values must be yielded in the same order as they were received via the function's
data_contexts argument.
"""
@abstractmethod
def project_neighbors(
self,
data_contexts: Iterable[DataContext[DataToken]],
current_type_name: str,
edge_info: EdgeInfo,
*,
runtime_arg_hints: Optional[Mapping[str, Any]] = None,
used_property_hints: Optional[AbstractSet[str]] = None,
filter_hints: Optional[Collection[FilterInfo]] = None,
neighbor_hints: Optional[Collection[Tuple[EdgeInfo, NeighborHint]]] = None,
**hints: Any,
) -> Iterable[Tuple[DataContext[DataToken], Iterable[DataToken]]]:
"""Produce the neighbors along a given edge for each of an iterable of input DataTokens.
To support traversing edges, as well as directives such as @optional and @recurse,
the interpreter needs to get the neighboring vertices along a particular edge for
a series of DataTokens.
For example, consider the following query:
{
Foo {
out_Foo_Bar {
< ... some fields here ... >
}
}
}
Once the interpreter has used the get_tokens_of_type() function to obtain
an iterable of DataTokens for the Foo type, it will automatically wrap each of them in
a "bookkeeping" object called DataContext. These DataContext objects allow
the interpreter to keep track of "which data came from where"; only the DataToken value
bound to each current_token attribute is relevant to the InterpreterAdapter API.
Having obtained an iterable of DataTokens and converted it to an iterable of DataContexts,
the interpreter needs to find the neighbors along the outbound Foo_Bar edge for each of
those DataTokens. To do so, the interpreter calls project_neighbors() with the iterable of
DataContexts, setting current_type_name = "Foo" and edge_info = ("out", "Foo_Bar"). This
function call requests an iterable of DataTokens representing the neighboring vertices for
each current_token contained in a DataContext. If the DataContext's current_token
attribute is set to None (which may happen when @optional edges are used), an empty
iterable of neighboring DataTokens should be returned.
A simple example implementation is as follows:
def project_neighbors(
self,
data_contexts: Iterable[DataContext[DataToken]],
current_type_name: str,
edge_info: EdgeInfo,
*,
runtime_arg_hints: Optional[Mapping[str, Any]] = None,
used_property_hints: Optional[AbstractSet[str]] = None,
filter_hints: Optional[Collection[FilterInfo]] = None,
neighbor_hints: Optional[Collection[Tuple[EdgeInfo, NeighborHint]]] = None,
**hints: Any,
) -> Iterable[Tuple[DataContext[DataToken], Iterable[DataToken]]]:
for data_context in data_contexts:
current_token = data_context.current_token
neighbors: Iterable[DataToken]
if current_token is None:
# Evaluating an @optional scope where the optional edge didn't exist.
# There are no neighbors here.
neighbors = []
else:
neighbors = _your_function_that_gets_neighbors_for_a_given_token(
current_type_name, edge_info, current_token
)
# Remember to always yield the DataContext alongside the produced value
yield data_context, neighbors
## Common bug to avoid in your implementation
In the previous code example, note that we called a module-scoped function,
`_your_function_that_gets_neighbors_for_a_given_token`, instead of one defined in the scope
of `project_neighbors`. Because Python evaluates references to variables in outer scopes
at the time a function or generator is invoked--not at the time it's defined--, it's very
easy to introduce subtle race conditions when defining generator factories in a nested
scope.
Because generators may be evaluated in arbitrary order, these bugs can appear only
intermittently and can be very difficult to troubleshoot. Always defining generator
factories in the module scope is one reliable way to avoid this problem.
In this example code, we use a for-loop to yield several generators from a generator
factory. Notice that we don't pass any arguments to the generator factory--the value its
generators yield come from its enclosing scope.
>>> def yield_generators():
... for target in range(1, 4):
... def _generator_factory():
... while True:
... # refers to `target` in the enclosing scope
... yield target
... yield _generator_factory()
...
>>> gens = yield_generators()
>>> one = next(gens)
>>> next(one)
1
>>> two = next(gens)
>>> next(two)
2
>>> next(one) # We expect 1, but get 2
2
>>> three = next(gens)
>>> next(three)
3
>>> next(two) # We expect 2, but get 3
3
>>> next(one) # We expect 1, got 2, and now get 3
3
Although we have three distinct generators, they're all yielding the same `target` from
`yield_generator`'s scope, which is also the same `target` that the for-loop advances with
each iteration.
If we define `_generator_factory` in the scope of the module, then we can't refer
inadvertently to shared state in an enclosing scope, which saves us from this bug.
>>> def _generator_factory(target):
... while True:
... # refers to the argument `target`, which exists
... # only in the local scope
... yield target
...
>>> def yield_generators():
... for target in range(1, 4):
... yield _generator_factory(target)
...
>>> gens = yield_generators()
>>> one = next(gens)
>>> next(one)
1
>>> two = next(gens)
>>> next(two)
2
>>> next(one)
1
>>> three = next(gens)
>>> next(three)
3
>>> next(two)
2
>>> next(one)
1
## Hints supplied to this function refer to neighboring vertices
Hint kwargs in this function, such as used_property_hints, filter_hints, and
neighbor_hints, describe the desired structure of the *neighboring* vertices that this
function produces (as opposed to the vertices supplied via the data_contexts argument).
For example, consider the following query:
{
Foo {
out_Foo_Bar {
name @output(out_name: "name")
}
}
}
To traverse the out_Foo_Bar edge, project_neighbors() is called with
used_property_hints=frozenset({"name"}) and data_contexts=<Iterable of DataContexts
pointing to Foo vertices>. This is because used_property_hints correspond to
neighboring vertices, and the neighboring Bar vertices (along the outbound Foo_Bar edge)
are being queried for their "name" property.
Args:
data_contexts: iterable of DataContext objects which specify the DataTokens whose
neighboring DataTokens need to be loaded.
current_type_name: name of the vertex type whose neighbors need to be loaded. Guaranteed
to be the name of a type defined in the schema being queried.
edge_info: direction and name of the edge along which neighboring vertices need to be
loaded. For example, in the query example above, this argument would be set
to ("out", "Foo_Bar").
runtime_arg_hints: names and values of any runtime arguments provided to the query
for use in filtering operations (e.g. "$arg_name").
used_property_hints: property names of the neighboring vertices being loaded that
are going to be used in a subsequent filtering or output step.
filter_hints: information about any filters applied to the neighboring vertices being
loaded, such as "which filtering operations are being performed?"
and "with which arguments?"
neighbor_hints: information about the edges of the neighboring vertices being loaded
that the query will eventually need to expand.
**hints: catch-all kwarg field making the function's signature forward-compatible with
future revisions of this library that add more hints.
Yields:
tuples (data_context, iterable_of_neighbor_tokens), providing the tokens of
the neighboring vertices together with the DataContext corresponding to those neighbors.
The yielded DataContext values must be yielded in the same order as they were received
via the function's data_contexts argument.
"""
@abstractmethod
def can_coerce_to_type(
self,
data_contexts: Iterable[DataContext[DataToken]],
current_type_name: str,
coerce_to_type_name: str,
*,
runtime_arg_hints: Optional[Mapping[str, Any]] = None,
used_property_hints: Optional[AbstractSet[str]] = None,
filter_hints: Optional[Collection[FilterInfo]] = None,
neighbor_hints: Optional[Collection[Tuple[EdgeInfo, NeighborHint]]] = None,
**hints: Any,
) -> Iterable[Tuple[DataContext[DataToken], bool]]:
"""Determine if each of an iterable of input DataTokens can be coerced to another type.
Consider a query like the following:
{
Foo {
out_Foo_Bar {
... on BarImpl {
< ... some fields here ... >
}
}
}
}
Assume that this query is written against a schema that contains the following definitions:
type Foo {
< ... some fields here ... >
out_Foo_Bar: [Bar]
}
interface Bar {
< ... some fields here ... >
}
type BarImpl implements Bar {
< ... some fields here ... >
}
When resolving the out_Foo_Bar edge in the query using project_neighbors(), the interpreter
receives DataTokens that (per the schema) are instances of the Bar interface type. However,
the query's subsequent "... on BarImpl" type coercion clause requires that the interpreter
discard any neighboring vertices that are not instances of BarImpl, a subtype of
the Bar interface type.
The interpreter uses can_coerce_to_type() for this purpose: it calls this function with
an iterable of DataContexts, with current_type_name set to "Bar" (the schema-implied type
of the query's scope) and with coerce_to_type_name set to "BarImpl" (the type to which
coercion is being attempted). For each DataContext in the input iterable, this function
yields a tuple containing the context itself and a bool set to True if the coercion
to the new type could be completed.
A simple example implementation is as follows:
def can_coerce_to_type(
self,
data_contexts: Iterable[DataContext[DataToken]],
current_type_name: str,
coerce_to_type_name: str,
*,
runtime_arg_hints: Optional[Mapping[str, Any]] = None,
used_property_hints: Optional[AbstractSet[str]] = None,
filter_hints: Optional[Collection[FilterInfo]] = None,
neighbor_hints: Optional[Collection[Tuple[EdgeInfo, NeighborHint]]] = None,
**hints: Any,
) -> Iterable[Tuple[DataContext[DataToken], bool]]:
for data_context in data_contexts:
current_token = data_context.current_token
can_coerce: bool
if current_token is None:
# Evaluating an @optional scope where the optional edge didn't exist.
# We cannot coerce something that doesn't exist.
can_coerce = False
else:
can_coerce = (
< check whether current_token represents a vertex
of type coerce_to_type_name >
)
# Remember to always yield the DataContext alongside the bool result.
yield data_context, can_coerce
Args:
data_contexts: iterable of DataContext objects which specify the DataTokens that are
being coerced to a new type
current_type_name: name of the vertex type from which the vertices are being coerced.
Guaranteed to be the name of an interface or union type defined
in the schema being queried.
coerce_to_type_name: name of the vertex type to which the vertices are being coerced.
Guaranteed to be the name of a type defined in the schema being
queried. If current_type_name refers to an interface type, then
coerce_to_type_name is guaranteed to refer to a type that
implements that interface. If current_type_name refers to a union
type, then coerce_to_type_name is guaranteed to refer to a type
that is a member of that union type.
runtime_arg_hints: names and values of any runtime arguments provided to the query
for use in filtering operations (e.g. "$arg_name").
used_property_hints: the property names of the vertices being processed that
are going to be used in a subsequent filtering or output step.
filter_hints: information about any filters applied to the vertices being processed,
such as "which filtering operations are being performed?"
and "with which arguments?"
neighbor_hints: information about the edges of the vertices being processed
that the query will eventually need to expand.
**hints: catch-all kwarg field making the function's signature forward-compatible with
future revisions of this library that add more hints.
Yields:
tuples (data_context, can_coerce), containing a DataContext and a corresponding boolean
indicating whether the DataContext's current_token can be coerced to the specified type.
The yielded DataContext values must be yielded in the same order as they were received
via the function's data_contexts argument.
"""
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AccessReviewScheduleDefinitionsOperations:
"""AccessReviewScheduleDefinitionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.authorization.v2021_03_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.AccessReviewScheduleDefinitionListResult"]:
"""Get access review schedule definitions.
:param filter: The filter to apply on the operation. Other than standard filters, one custom
filter option is supported : 'assignedToMeToReview()'. When one specified
$filter=assignedToMeToReview(), only items that are assigned to the calling user to review are
returned.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AccessReviewScheduleDefinitionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2021_03_01_preview.models.AccessReviewScheduleDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessReviewScheduleDefinitionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str', skip_quote=True)
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AccessReviewScheduleDefinitionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/accessReviewScheduleDefinitions'} # type: ignore
async def get_by_id(
self,
schedule_definition_id: str,
**kwargs: Any
) -> "_models.AccessReviewScheduleDefinition":
"""Get single access review definition.
:param schedule_definition_id: The id of the access review schedule definition.
:type schedule_definition_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccessReviewScheduleDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2021_03_01_preview.models.AccessReviewScheduleDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessReviewScheduleDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01-preview"
accept = "application/json"
# Construct URL
url = self.get_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'scheduleDefinitionId': self._serialize.url("schedule_definition_id", schedule_definition_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AccessReviewScheduleDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}'} # type: ignore
async def delete_by_id(
self,
schedule_definition_id: str,
**kwargs: Any
) -> None:
"""Delete access review schedule definition.
:param schedule_definition_id: The id of the access review schedule definition.
:type schedule_definition_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01-preview"
accept = "application/json"
# Construct URL
url = self.delete_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'scheduleDefinitionId': self._serialize.url("schedule_definition_id", schedule_definition_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_by_id.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}'} # type: ignore
async def create_or_update_by_id(
self,
schedule_definition_id: str,
properties: "_models.AccessReviewScheduleDefinitionProperties",
**kwargs: Any
) -> "_models.AccessReviewScheduleDefinition":
"""Create or Update access review schedule definition.
:param schedule_definition_id: The id of the access review schedule definition.
:type schedule_definition_id: str
:param properties: Access review schedule definition properties.
:type properties: ~azure.mgmt.authorization.v2021_03_01_preview.models.AccessReviewScheduleDefinitionProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccessReviewScheduleDefinition, or the result of cls(response)
:rtype: ~azure.mgmt.authorization.v2021_03_01_preview.models.AccessReviewScheduleDefinition
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessReviewScheduleDefinition"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'scheduleDefinitionId': self._serialize.url("schedule_definition_id", schedule_definition_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(properties, 'AccessReviewScheduleDefinitionProperties')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AccessReviewScheduleDefinition', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update_by_id.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}'} # type: ignore
async def stop(
self,
schedule_definition_id: str,
**kwargs: Any
) -> None:
"""Stop access review definition.
:param schedule_definition_id: The id of the access review schedule definition.
:type schedule_definition_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-01-preview"
accept = "application/json"
# Construct URL
url = self.stop.metadata['url'] # type: ignore
path_format_arguments = {
'scheduleDefinitionId': self._serialize.url("schedule_definition_id", schedule_definition_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
stop.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/accessReviewScheduleDefinitions/{scheduleDefinitionId}/stop'} # type: ignore
| |
import os
import itertools
from collections import Callable, OrderedDict
from functools import reduce
from django.forms.forms import (BaseForm, DeclarativeFieldsMetaclass,
NON_FIELD_ERRORS, pretty_name)
from django.forms.widgets import media_property
from django.core.exceptions import FieldError
from django.core.validators import EMPTY_VALUES
from django.forms.utils import ErrorList
from django.forms.formsets import BaseFormSet, formset_factory
from django.utils.translation import ugettext_lazy as _, ugettext
from django.utils.text import capfirst, get_valid_filename
from mongoengine.fields import (ObjectIdField, ListField, ReferenceField,
FileField, MapField, EmbeddedDocumentField)
try:
from mongoengine.base import ValidationError
except ImportError:
from mongoengine.errors import ValidationError
from mongoengine.queryset import OperationError, Q
from mongoengine.queryset.base import BaseQuerySet
from mongoengine.connection import get_db, DEFAULT_CONNECTION_NAME
from mongoengine.base import NON_FIELD_ERRORS as MONGO_NON_FIELD_ERRORS
from gridfs import GridFS
from mongodbforms.documentoptions import DocumentMetaWrapper
from mongodbforms.util import with_metaclass, load_field_generator
_fieldgenerator = load_field_generator()
def _get_unique_filename(name, db_alias=DEFAULT_CONNECTION_NAME,
collection_name='fs'):
fs = GridFS(get_db(db_alias), collection_name)
file_root, file_ext = os.path.splitext(get_valid_filename(name))
count = itertools.count(1)
while fs.exists(filename=name):
# file_ext includes the dot.
name = os.path.join("%s_%s%s" % (file_root, next(count), file_ext))
return name
def _save_iterator_file(field, instance, uploaded_file, file_data=None):
"""
Takes care of saving a file for a list field. Returns a Mongoengine
fileproxy object or the file field.
"""
# for a new file we need a new proxy object
if file_data is None:
file_data = field.field.get_proxy_obj(key=field.name,
instance=instance)
if file_data.instance is None:
file_data.instance = instance
if file_data.key is None:
file_data.key = field.name
if file_data.grid_id:
file_data.delete()
uploaded_file.seek(0)
filename = _get_unique_filename(uploaded_file.name, field.field.db_alias,
field.field.collection_name)
file_data.put(uploaded_file, content_type=uploaded_file.content_type,
filename=filename)
file_data.close()
return file_data
def construct_instance(form, instance, fields=None, exclude=None):
"""
Constructs and returns a document instance from the bound ``form``'s
``cleaned_data``, but does not save the returned instance to the
database.
"""
cleaned_data = form.cleaned_data
file_field_list = []
# check wether object is instantiated
if isinstance(instance, type):
instance = instance()
for f in instance._fields.values():
if isinstance(f, ObjectIdField):
continue
if f.name not in cleaned_data:
continue
if fields is not None and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
# Defer saving file-type fields until after the other fields, so a
# callable upload_to can use the values from other fields.
if isinstance(f, FileField) or \
(isinstance(f, (MapField, ListField)) and
isinstance(f.field, FileField)):
file_field_list.append(f)
else:
setattr(instance, f.name, cleaned_data.get(f.name))
for f in file_field_list:
if isinstance(f, MapField):
map_field = getattr(instance, f.name)
uploads = cleaned_data[f.name]
for key, uploaded_file in uploads.items():
if uploaded_file is None:
continue
file_data = map_field.get(key, None)
map_field[key] = _save_iterator_file(f, instance,
uploaded_file, file_data)
setattr(instance, f.name, map_field)
elif isinstance(f, ListField):
list_field = getattr(instance, f.name)
uploads = cleaned_data[f.name]
for i, uploaded_file in enumerate(uploads):
if uploaded_file is None:
continue
try:
file_data = list_field[i]
except IndexError:
file_data = None
file_obj = _save_iterator_file(f, instance,
uploaded_file, file_data)
try:
list_field[i] = file_obj
except IndexError:
list_field.append(file_obj)
setattr(instance, f.name, list_field)
else:
field = getattr(instance, f.name)
upload = cleaned_data[f.name]
if upload is None:
continue
try:
upload.file.seek(0)
# delete first to get the names right
if field.grid_id:
field.delete()
filename = _get_unique_filename(upload.name, f.db_alias,
f.collection_name)
field.put(upload, content_type=upload.content_type,
filename=filename)
setattr(instance, f.name, field)
except AttributeError:
# file was already uploaded and not changed during edit.
# upload is already the gridfsproxy object we need.
upload.get()
setattr(instance, f.name, upload)
return instance
def save_instance(form, instance, fields=None, fail_message='saved',
commit=True, exclude=None, construct=True):
"""
Saves bound Form ``form``'s cleaned_data into document ``instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
If construct=False, assume ``instance`` has already been constructed and
just needs to be saved.
"""
if construct:
instance = construct_instance(form, instance, fields, exclude)
if form.errors:
raise ValueError("The %s could not be %s because the data didn't"
" validate." % (instance.__class__.__name__,
fail_message))
if commit and hasattr(instance, 'save'):
# see BaseDocumentForm._post_clean for an explanation
# if len(form._meta._dont_save) > 0:
# data = instance._data
# new_data = dict([(n, f) for n, f in data.items() if not n \
# in form._meta._dont_save])
# instance._data = new_data
# instance.save()
# instance._data = data
# else:
instance.save()
return instance
def document_to_dict(instance, fields=None, exclude=None):
"""
Returns a dict containing the data in ``instance`` suitable for passing as
a Form's ``initial`` keyword argument.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned dict.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned dict, even if they are listed in
the ``fields`` argument.
"""
data = {}
for f in instance._fields.values():
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
data[f.name] = getattr(instance, f.name, '')
return data
def fields_for_document(document, fields=None, exclude=None, widgets=None,
formfield_callback=None,
field_generator=_fieldgenerator):
"""
Returns a ``SortedDict`` containing form fields for the given model.
``fields`` is an optional list of field names. If provided, only the named
fields will be included in the returned fields.
``exclude`` is an optional list of field names. If provided, the named
fields will be excluded from the returned fields, even if they are listed
in the ``fields`` argument.
"""
field_list = []
if isinstance(field_generator, type):
field_generator = field_generator()
if formfield_callback and not isinstance(formfield_callback, Callable):
raise TypeError('formfield_callback must be a function or callable')
for name in document._fields_ordered:
f = document._fields.get(name)
if isinstance(f, ObjectIdField):
continue
if fields and f.name not in fields:
continue
if exclude and f.name in exclude:
continue
if widgets and f.name in widgets:
kwargs = {'widget': widgets[f.name]}
else:
kwargs = {}
if formfield_callback:
formfield = formfield_callback(f, **kwargs)
else:
formfield = field_generator.generate(f, **kwargs)
if formfield:
field_list.append((f.name, formfield))
field_dict = OrderedDict(field_list)
if fields:
field_dict = OrderedDict(
[(f, field_dict.get(f)) for f in fields
if ((not exclude) or (exclude and f not in exclude))]
)
return field_dict
class ModelFormOptions(object):
def __init__(self, options=None):
# document class can be declared with 'document =' or 'model ='
self.document = getattr(options, 'document', None)
if self.document is None:
self.document = getattr(options, 'model', None)
self.model = self.document
meta = getattr(self.document, '_meta', {})
# set up the document meta wrapper if document meta is a dict
if self.document is not None and \
not isinstance(meta, DocumentMetaWrapper):
self.document._meta = DocumentMetaWrapper(self.document)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.widgets = getattr(options, 'widgets', None)
self.embedded_field = getattr(options, 'embedded_field_name', None)
self.formfield_generator = getattr(options, 'formfield_generator',
_fieldgenerator)
self._dont_save = []
self.labels = getattr(options, 'labels', None)
self.help_texts = getattr(options, 'help_texts', None)
class DocumentFormMetaclass(DeclarativeFieldsMetaclass):
def __new__(cls, name, bases, attrs):
formfield_callback = attrs.pop('formfield_callback', None)
try:
parents = [
b for b in bases
if issubclass(b, DocumentForm) or
issubclass(b, EmbeddedDocumentForm)
]
except NameError:
# We are defining DocumentForm itself.
parents = None
new_class = super(DocumentFormMetaclass, cls).__new__(cls, name,
bases, attrs)
if not parents:
return new_class
if 'media' not in attrs:
new_class.media = media_property(new_class)
opts = new_class._meta = ModelFormOptions(
getattr(new_class, 'Meta', None)
)
if opts.document:
formfield_generator = getattr(opts,
'formfield_generator',
_fieldgenerator)
# If a model is defined, extract form fields from it.
fields = fields_for_document(opts.document, opts.fields,
opts.exclude, opts.widgets,
formfield_callback,
formfield_generator)
# make sure opts.fields doesn't specify an invalid field
none_document_fields = [k for k, v in fields.items() if not v]
missing_fields = (set(none_document_fields) -
set(new_class.declared_fields.keys()))
if missing_fields:
message = 'Unknown field(s) (%s) specified for %s'
message = message % (', '.join(missing_fields),
opts.model.__name__)
raise FieldError(message)
# Override default model fields with any custom declared ones
# (plus, include all the other declared fields).
fields.update(new_class.declared_fields)
else:
fields = new_class.declared_fields
new_class.base_fields = fields
return new_class
class BaseDocumentForm(BaseForm):
# TODO: this is needed for django 1.10, check !
use_required_attribute = True
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, instance=None, use_required_attribute=None):
opts = self._meta
if instance is None:
if opts.document is None:
raise ValueError('A document class must be provided.')
# if we didn't get an instance, instantiate a new one
self.instance = opts.document()
object_data = {}
else:
self.instance = instance
object_data = document_to_dict(instance, opts.fields, opts.exclude)
# if initial was provided, it should override the values from instance
if initial is not None:
object_data.update(initial)
# self._validate_unique will be set to True by BaseModelForm.clean().
# It is False by default so overriding self.clean() and failing to call
# super will stop validate_unique from being called.
self._validate_unique = False
super(BaseDocumentForm, self).__init__(data, files, auto_id, prefix,
object_data, error_class,
label_suffix, empty_permitted)
def _update_errors(self, message_dict):
for k, v in list(message_dict.items()):
if k != NON_FIELD_ERRORS:
self._errors.setdefault(k, self.error_class()).extend(v)
# Remove the invalid data from the cleaned_data dict
if k in self.cleaned_data:
del self.cleaned_data[k]
if NON_FIELD_ERRORS in message_dict:
messages = message_dict[NON_FIELD_ERRORS]
self._errors.setdefault(NON_FIELD_ERRORS,
self.error_class()).extend(messages)
def _get_validation_exclusions(self):
"""
For backwards-compatibility, several types of fields need to be
excluded from model validation. See the following tickets for
details: #12507, #12521, #12553
"""
exclude = []
# Build up a list of fields that should be excluded from model field
# validation and unique checks.
for f in self.instance._fields.values():
# Exclude fields that aren't on the form. The developer may be
# adding these values to the model after form validation.
if f.name not in self.fields:
exclude.append(f.name)
# Don't perform model validation on fields that were defined
# manually on the form and excluded via the ModelForm's Meta
# class. See #12901.
elif self._meta.fields and f.name not in self._meta.fields:
exclude.append(f.name)
elif self._meta.exclude and f.name in self._meta.exclude:
exclude.append(f.name)
# Exclude fields that failed form validation. There's no need for
# the model fields to validate them as well.
elif f.name in list(self._errors.keys()):
exclude.append(f.name)
# Exclude empty fields that are not required by the form, if the
# underlying model field is required. This keeps the model field
# from raising a required error. Note: don't exclude the field from
# validaton if the model field allows blanks. If it does, the blank
# value may be included in a unique check, so cannot be excluded
# from validation.
else:
field_value = self.cleaned_data.get(f.name, None)
if not f.required and field_value in EMPTY_VALUES:
exclude.append(f.name)
return exclude
def clean(self):
self._validate_unique = True
return self.cleaned_data
def _post_clean(self):
opts = self._meta
# Update the model instance with self.cleaned_data.
self.instance = construct_instance(self, self.instance, opts.fields,
opts.exclude)
changed_fields = getattr(self.instance, '_changed_fields', [])
exclude = self._get_validation_exclusions()
try:
for f in self.instance._fields.values():
value = getattr(self.instance, f.name)
if f.name not in exclude:
f.validate(value)
elif value in EMPTY_VALUES and f.name not in changed_fields:
# mongoengine chokes on empty strings for fields
# that are not required. Clean them up here, though
# this is maybe not the right place :-)
setattr(self.instance, f.name, None)
# opts._dont_save.append(f.name)
except ValidationError as e:
# FIXME: f might be referenced before assignment
err = {f.name: [e.message]}
self._update_errors(err)
# Call validate() on the document. Since mongoengine
# does not provide an argument to specify which fields
# should be excluded during validation, we replace
# instance._fields_ordered with a version that does
# not include excluded fields. The attribute gets
# restored after validation.
original_fields = self.instance._fields_ordered
self.instance._fields_ordered = tuple(
[f for f in original_fields if f not in exclude]
)
try:
self.instance.validate()
except ValidationError as e:
if MONGO_NON_FIELD_ERRORS in e.errors:
error = e.errors.get(MONGO_NON_FIELD_ERRORS)
else:
error = e.message
self._update_errors({NON_FIELD_ERRORS: [error, ]})
finally:
self.instance._fields_ordered = original_fields
# Validate uniqueness if needed.
if self._validate_unique:
self.validate_unique()
def validate_unique(self):
"""
Validates unique constrains on the document.
unique_with is supported now.
"""
errors = []
exclude = self._get_validation_exclusions()
for f in self.instance._fields.values():
if f.unique and f.name not in exclude:
filter_kwargs = {
f.name: getattr(self.instance, f.name),
'q_obj': None,
}
if f.unique_with:
for u_with in f.unique_with:
u_with_field = self.instance._fields[u_with]
u_with_attr = getattr(self.instance, u_with)
# handling ListField(ReferenceField()) sucks big time
# What we need to do is construct a Q object that
# queries for the pk of every list entry and only
# accepts lists with the same length as our list
if isinstance(u_with_field, ListField) and \
isinstance(u_with_field.field, ReferenceField):
q_list = [Q(**{u_with: k.pk}) for k in u_with_attr]
q = reduce(lambda x, y: x & y, q_list)
size_key = '%s__size' % u_with
q = q & Q(**{size_key: len(u_with_attr)})
filter_kwargs['q_obj'] = q & filter_kwargs['q_obj']
else:
filter_kwargs[u_with] = u_with_attr
qs = self.instance.__class__.objects.clone()
qs = qs.no_dereference().filter(**filter_kwargs)
# Exclude the current object from the query if we are editing
# an instance (as opposed to creating a new one)
if self.instance.pk is not None:
qs = qs.filter(pk__ne=self.instance.pk)
if qs.count() > 0:
message = _("%s with this %s already exists.") % (
str(capfirst(self.instance._meta.verbose_name)),
str(pretty_name(f.name))
)
err_dict = {f.name: [message]}
self._update_errors(err_dict)
errors.append(err_dict)
return errors
def save(self, commit=True):
"""
Saves this ``form``'s cleaned_data into model instance
``self.instance``.
If commit=True, then the changes to ``instance`` will be saved to the
database. Returns ``instance``.
"""
try:
if self.instance.pk is None:
fail_message = 'created'
else:
fail_message = 'changed'
except (KeyError, AttributeError):
fail_message = 'embedded document saved'
obj = save_instance(self, self.instance, self._meta.fields,
fail_message, commit, construct=False)
return obj
save.alters_data = True
class DocumentForm(with_metaclass(DocumentFormMetaclass, BaseDocumentForm)):
pass
def documentform_factory(document, form=DocumentForm, fields=None,
exclude=None, formfield_callback=None):
# Build up a list of attributes that the Meta object will have.
attrs = {'document': document, 'model': document}
if fields is not None:
attrs['fields'] = fields
if exclude is not None:
attrs['exclude'] = exclude
# If parent form class already has an inner Meta, the Meta we're
# creating needs to inherit from the parent's inner meta.
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type('Meta', parent, attrs)
# Give this new form class a reasonable name.
if isinstance(document, type):
doc_inst = document()
else:
doc_inst = document
class_name = doc_inst.__class__.__name__ + 'Form'
# Class attributes for the new form class.
form_class_attrs = {
'Meta': Meta,
'formfield_callback': formfield_callback
}
return DocumentFormMetaclass(class_name, (form,), form_class_attrs)
class EmbeddedDocumentForm(with_metaclass(DocumentFormMetaclass,
BaseDocumentForm)):
def __init__(self, parent_document, data=None, files=None, position=None,
*args, **kwargs):
if self._meta.embedded_field is not None and \
self._meta.embedded_field not in parent_document._fields:
raise FieldError("Parent document must have field %s" %
self._meta.embedded_field)
instance = kwargs.pop('instance', None)
if isinstance(parent_document._fields.get(self._meta.embedded_field),
ListField):
# if we received a list position of the instance and no instance
# load the instance from the parent document and proceed as normal
if instance is None and position is not None:
instance = getattr(parent_document,
self._meta.embedded_field)[position]
# same as above only the other way around. Note: Mongoengine
# defines equality as having the same data, so if you have 2
# objects with the same data the first one will be edited. That
# may or may not be the right one.
if instance is not None and position is None:
emb_list = getattr(parent_document, self._meta.embedded_field)
position = next(
(i for i, obj in enumerate(emb_list) if obj == instance),
None
)
super(EmbeddedDocumentForm, self).__init__(data=data, files=files,
instance=instance, *args,
**kwargs)
self.parent_document = parent_document
self.position = position
def save(self, commit=True):
"""If commit is True the embedded document is added to the parent
document. Otherwise the parent_document is left untouched and the
embedded is returned as usual.
"""
if self.errors:
raise ValueError("The %s could not be saved because the data"
"didn't validate." %
self.instance.__class__.__name__)
if commit:
field = self.parent_document._fields.get(self._meta.embedded_field)
if isinstance(field, ListField) and self.position is None:
# no position given, simply appending to ListField
try:
self.parent_document.update(**{
"push__" + self._meta.embedded_field: self.instance
})
except:
raise OperationError("The %s could not be appended." %
self.instance.__class__.__name__)
elif isinstance(field, ListField) and self.position is not None:
# updating ListField at given position
try:
self.parent_document.update(**{
"__".join(("set", self._meta.embedded_field,
str(self.position))): self.instance
})
except:
raise OperationError("The %s could not be updated at "
"position %d." %
(self.instance.__class__.__name__,
self.position))
else:
# not a listfield on parent, treat as an embedded field
setattr(self.parent_document, self._meta.embedded_field,
self.instance)
self.parent_document.save()
return self.instance
class BaseDocumentFormSet(BaseFormSet):
"""
A ``FormSet`` for editing a queryset and/or adding new objects to it.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
queryset=[], **kwargs):
if not isinstance(queryset, (list, BaseQuerySet)):
queryset = [queryset]
self.queryset = queryset
# for django 1.10
self.new_objects = []
self.changed_objects = []
self.deleted_objects = []
self.initial = self.construct_initial()
defaults = {'data': data, 'files': files, 'auto_id': auto_id,
'prefix': prefix, 'initial': self.initial}
defaults.update(kwargs)
super(BaseDocumentFormSet, self).__init__(**defaults)
def construct_initial(self):
initial = []
try:
for d in self.get_queryset():
initial.append(document_to_dict(d))
except TypeError:
pass
return initial
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if not (self.data or self.files):
return len(self.get_queryset())
return super(BaseDocumentFormSet, self).initial_form_count()
def get_queryset(self):
qs = self.queryset or []
return qs
def save_object(self, form):
obj = form.save(commit=False)
return obj
def save(self, commit=True):
"""
Saves model instances for every form, adding and changing instances
as necessary, and returns the list of instances.
"""
saved = []
for form in self.forms:
if not form.has_changed() and form not in self.initial_forms:
continue
obj = self.save_object(form)
if form.cleaned_data.get("DELETE", False):
try:
obj.delete()
except AttributeError:
# if it has no delete method it is an embedded object. We
# just don't add to the list and it's gone. Cool huh?
continue
if commit:
obj.save()
saved.append(obj)
return saved
def save_new_objects(self, commit=True):
# for django 1.10
self.new_objects = []
for form in self.extra_forms:
if not form.has_changed():
continue
# If someone has marked an add form for deletion, don't save the
# object.
if self.can_delete and self._should_delete_form(form):
continue
self.new_objects.append(self.save_new(form, commit=commit))
if not commit:
self.saved_forms.append(form)
return self.new_objects
def save_existing_objects(self, commit=True):
# for django 1.10
self.changed_objects = []
self.deleted_objects = []
if not self.initial_forms:
return []
saved_instances = []
forms_to_delete = self.deleted_forms
for form in self.initial_forms:
obj = form.instance
if form in forms_to_delete:
# If the pk is None, it means that the object can't be
# deleted again. Possible reason for this is that the
# object was already deleted from the DB. Refs #14877.
if obj.pk is None:
continue
self.deleted_objects.append(obj)
if commit:
obj.delete()
elif form.has_changed():
self.changed_objects.append((obj, form.changed_data))
saved_instances.append(self.save_existing(form, obj, commit=commit))
if not commit:
self.saved_forms.append(form)
return saved_instances
def clean(self):
self.validate_unique()
def validate_unique(self):
errors = []
for form in self.forms:
if not hasattr(form, 'cleaned_data'):
continue
errors += form.validate_unique()
if errors:
raise ValidationError(errors)
def get_date_error_message(self, date_check):
return ugettext("Please correct the duplicate data for %(field_name)s "
"which must be unique for the %(lookup)s "
"in %(date_field)s.") % {
'field_name': date_check[2],
'date_field': date_check[3],
'lookup': str(date_check[1]),
}
def get_form_error(self):
return ugettext("Please correct the duplicate values below.")
def documentformset_factory(document, form=DocumentForm,
formfield_callback=None,
formset=BaseDocumentFormSet,
extra=1, can_delete=False, can_order=False,
max_num=None, fields=None, exclude=None):
"""
Returns a FormSet class for the given Django model class.
"""
form = documentform_factory(document, form=form, fields=fields,
exclude=exclude,
formfield_callback=formfield_callback)
FormSet = formset_factory(form, formset, extra=extra, max_num=max_num,
can_order=can_order, can_delete=can_delete)
FormSet.model = document
FormSet.document = document
return FormSet
class BaseInlineDocumentFormSet(BaseDocumentFormSet):
"""
A formset for child objects related to a parent.
self.instance -> the document containing the inline objects
"""
def __init__(self, data=None, files=None, instance=None,
save_as_new=False, prefix=None, queryset=[], **kwargs):
self.instance = instance
self.save_as_new = save_as_new
super(BaseInlineDocumentFormSet, self).__init__(data, files,
prefix=prefix,
queryset=queryset,
**kwargs)
def initial_form_count(self):
if self.save_as_new:
return 0
return super(BaseInlineDocumentFormSet, self).initial_form_count()
# @classmethod
def get_default_prefix(cls):
return cls.document.__name__.lower()
get_default_prefix = classmethod(get_default_prefix)
def add_fields(self, form, index):
super(BaseInlineDocumentFormSet, self).add_fields(form, index)
# Add the generated field to form._meta.fields if it's defined to make
# sure validation isn't skipped on that field.
if form._meta.fields:
if isinstance(form._meta.fields, tuple):
form._meta.fields = list(form._meta.fields)
# form._meta.fields.append(self.fk.name)
def get_unique_error_message(self, unique_check):
unique_check = [
field for field in unique_check if field != self.fk.name
]
return super(BaseInlineDocumentFormSet, self).get_unique_error_message(
unique_check
)
def inlineformset_factory(document, form=DocumentForm,
formset=BaseInlineDocumentFormSet,
fields=None, exclude=None,
extra=1, can_order=False, can_delete=True,
max_num=None, formfield_callback=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
}
FormSet = documentformset_factory(document, **kwargs)
return FormSet
class EmbeddedDocumentFormSet(BaseDocumentFormSet):
def __init__(self, data=None, files=None, save_as_new=False,
prefix=None, queryset=[], parent_document=None, **kwargs):
if parent_document is not None:
self.parent_document = parent_document
if 'instance' in kwargs:
instance = kwargs.pop('instance')
if parent_document is None:
self.parent_document = instance
queryset = getattr(self.parent_document, self.form._meta.embedded_field)
if not isinstance(queryset, list) and queryset is None:
queryset = []
elif not isinstance(queryset, list):
queryset = [queryset, ]
super(EmbeddedDocumentFormSet, self).__init__(data, files, save_as_new,
prefix, queryset,
**kwargs)
def _construct_form(self, i, **kwargs):
defaults = {'parent_document': self.parent_document}
# add position argument to the form. Otherwise we will spend
# a huge amount of time iterating over the list field on form __init__
emb_list = getattr(self.parent_document,
self.form._meta.embedded_field)
if emb_list is not None and len(emb_list) > i:
defaults['position'] = i
defaults.update(kwargs)
form = super(EmbeddedDocumentFormSet, self)._construct_form(
i, **defaults)
return form
@classmethod
def get_default_prefix(cls):
return cls.document.__name__.lower()
@property
def empty_form(self):
form = self.form(
self.parent_document,
auto_id=self.auto_id,
prefix=self.add_prefix('__prefix__'),
empty_permitted=True,
)
self.add_fields(form, None)
return form
def save(self, commit=True):
# Don't try to save the new documents. Embedded objects don't have
# a save method anyway.
objs = super(EmbeddedDocumentFormSet, self).save(commit=False)
objs = objs or []
if commit and self.parent_document is not None:
field = self.parent_document._fields.get(
self.form._meta.embedded_field, None)
if isinstance(field, EmbeddedDocumentField):
try:
obj = objs[0]
except IndexError:
obj = None
setattr(
self.parent_document, self.form._meta.embedded_field, obj)
else:
setattr(
self.parent_document, self.form._meta.embedded_field, objs)
self.parent_document.save()
return objs
def _get_embedded_field(parent_doc, document, emb_name=None, can_fail=False):
if emb_name:
emb_fields = [
f for f in parent_doc._fields.values() if f.name == emb_name]
if len(emb_fields) == 1:
field = emb_fields[0]
if not isinstance(field, (EmbeddedDocumentField, ListField)) or \
(isinstance(field, EmbeddedDocumentField) and
field.document_type != document) or \
(isinstance(field, ListField) and
isinstance(field.field, EmbeddedDocumentField) and
field.field.document_type != document):
raise Exception(
"emb_name '%s' is not a EmbeddedDocumentField or not a ListField to %s" % (
emb_name, document
)
)
elif len(emb_fields) == 0:
raise Exception("%s has no field named '%s'" %
(parent_doc, emb_name))
else:
emb_fields = [
f for f in parent_doc._fields.values()
if (isinstance(f, EmbeddedDocumentField) and
f.document_type == document) or
(isinstance(f, ListField) and
isinstance(f.field, EmbeddedDocumentField) and
f.field.document_type == document)
]
if len(emb_fields) == 1:
field = emb_fields[0]
elif len(emb_fields) == 0:
if can_fail:
return
raise Exception(
"%s has no EmbeddedDocumentField or ListField to %s" % (parent_doc, document))
else:
raise Exception(
"%s has more than 1 EmbeddedDocumentField to %s" % (parent_doc, document))
# FIXME: this might be referenced before assignment
return field
def embeddedformset_factory(document, parent_document,
form=EmbeddedDocumentForm,
formset=EmbeddedDocumentFormSet,
embedded_name=None,
fields=None, exclude=None,
extra=3, can_order=False, can_delete=True,
max_num=None, formfield_callback=None):
"""
Returns an ``InlineFormSet`` for the given kwargs.
You must provide ``fk_name`` if ``model`` has more than one ``ForeignKey``
to ``parent_model``.
"""
emb_field = _get_embedded_field(parent_document, document, emb_name=embedded_name)
if isinstance(emb_field, EmbeddedDocumentField):
max_num = 1
kwargs = {
'form': form,
'formfield_callback': formfield_callback,
'formset': formset,
'extra': extra,
'can_delete': can_delete,
'can_order': can_order,
'fields': fields,
'exclude': exclude,
'max_num': max_num,
}
FormSet = documentformset_factory(document, **kwargs)
FormSet.form._meta.embedded_field = emb_field.name
return FormSet
| |
################################################################################
#This software was developed by the University of Tennessee as part of the
#Distributed Data Analysis of Neutron Scattering Experiments (DANSE)
#project funded by the US National Science Foundation.
#
#See the license text in license.txt
#
#copyright 2009, University of Tennessee
################################################################################
"""
Dialog panel to explore the P(r) inversion results for a range
of D_max value. User picks a number of points and a range of
distances, then can toggle between inversion outputs and see
their distribution as a function of D_max.
"""
import wx
import numpy as np
import logging
import sys
logger = logging.getLogger(__name__)
# Avoid Matplotlib complaining about the lack of legend on the plot
import warnings
warnings.simplefilter("ignore")
# Import plotting classes
from sas.sasgui.plottools.PlotPanel import PlotPanel
from sas.sasgui.plottools import Data1D as Model1D
from sas.sasgui.guiframe.gui_style import GUIFRAME_ID
from sas.sasgui.plottools.plottables import Graph
from pr_widgets import PrTextCtrl
# Default number of points on the output plot
DEFAULT_NPTS = 10
# Default output parameter to plot
DEFAULT_OUTPUT = 'Chi2/dof'
class OutputPlot(PlotPanel):
"""
Plot panel used to show the selected results as a function
of D_max
"""
## Title for plottools
window_caption = "D Explorer"
def __init__(self, d_min, d_max, parent, id= -1, color=None, \
dpi=None, style=wx.NO_FULL_REPAINT_ON_RESIZE, **kwargs):
"""
Initialization. The parameters added to PlotPanel are:
:param d_min: Minimum value of D_max to explore
:param d_max: Maximum value of D_max to explore
"""
PlotPanel.__init__(self, parent, id=id, style=style, **kwargs)
self.parent = parent
self.min = d_min
self.max = d_max
self.npts = DEFAULT_NPTS
step = (self.max - self.min) / (self.npts - 1)
self.x = np.arange(self.min, self.max + step * 0.01, step)
dx = np.zeros(len(self.x))
y = np.ones(len(self.x))
dy = np.zeros(len(self.x))
# Plot area
self.plot = Model1D(self.x, y=y, dy=dy)
self.plot.name = DEFAULT_OUTPUT
self.plot.symbol = GUIFRAME_ID.CURVE_SYMBOL_NUM
# Graph
self.graph = Graph()
self.graph.xaxis("\\rm{D_{max}}", 'A')
self.graph.yaxis("\\rm{%s}" % DEFAULT_OUTPUT, "")
self.graph.add(self.plot)
self.graph.render(self)
self.toolbar.DeleteToolByPos(0)
self.toolbar.DeleteToolByPos(8)
self.toolbar.Realize()
def onContextMenu(self, event):
"""
Default context menu for the plot panel
:TODO: Would be nice to add printing and log/linear scales.
The current verison of plottools no longer plays well with
plots outside of guiframe. Guiframe team needs to fix this.
"""
# Slicer plot popup menu
wx_id = wx.NewId()
slicerpop = wx.Menu()
slicerpop.Append(wx_id, '&Save image', 'Save image as PNG')
wx.EVT_MENU(self, wx_id, self.onSaveImage)
wx_id = wx.NewId()
slicerpop.AppendSeparator()
slicerpop.Append(wx_id, '&Reset Graph')
wx.EVT_MENU(self, wx_id, self.onResetGraph)
pos = event.GetPosition()
pos = self.ScreenToClient(pos)
self.PopupMenu(slicerpop, pos)
class Results(object):
"""
Class to hold the inversion output parameters
as a function of D_max
"""
def __init__(self):
"""
Initialization. Create empty arrays
and dictionary of labels.
"""
# Array of output for each inversion
self.chi2 = []
self.osc = []
self.pos = []
self.pos_err = []
self.rg = []
self.iq0 = []
self.bck = []
self.d_max = []
# Dictionary of outputs
self.outputs = {}
self.outputs['Chi2/dof'] = ["\chi^2/dof", "a.u.", self.chi2]
self.outputs['Oscillation parameter'] = ["Osc", "a.u.", self.osc]
self.outputs['Positive fraction'] = ["P^+", "a.u.", self.pos]
self.outputs['1-sigma positive fraction'] = ["P^+_{1\ \sigma}",
"a.u.", self.pos_err]
self.outputs['Rg'] = ["R_g", "A", self.rg]
self.outputs['I(q=0)'] = ["I(q=0)", "1/A", self.iq0]
self.outputs['Background'] = ["Bck", "1/A", self.bck]
class ExploreDialog(wx.Dialog):
"""
The explorer dialog box. This dialog is meant to be
invoked by the InversionControl class.
"""
def __init__(self, pr_state, nfunc, *args, **kwds):
"""
Initialization. The parameters added to Dialog are:
:param pr_state: sas.sascalc.pr.invertor.Invertor object
:param nfunc: Number of terms in the expansion
"""
kwds["style"] = wx.RESIZE_BORDER | wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
# Initialize Results object
self.results = Results()
self.pr_state = pr_state
self._default_min = 0.9 * self.pr_state.d_max
self._default_max = 1.1 * self.pr_state.d_max
self.nfunc = nfunc
# Control for number of points
self.npts_ctl = PrTextCtrl(self, -1, style=wx.TE_PROCESS_ENTER,
size=(60, 20))
# Control for the minimum value of D_max
self.dmin_ctl = PrTextCtrl(self, -1, style=wx.TE_PROCESS_ENTER,
size=(60, 20))
# Control for the maximum value of D_max
self.dmax_ctl = PrTextCtrl(self, -1, style=wx.TE_PROCESS_ENTER,
size=(60, 20))
# Output selection box for the y axis
self.output_box = None
# Create the plot object
self.plotpanel = OutputPlot(self._default_min, self._default_max,
self, -1, style=wx.RAISED_BORDER)
# Create the layout of the dialog
self.__do_layout()
self.Fit()
# Calculate exploration results
self._recalc()
# Graph the default output curve
self._plot_output()
class Event(object):
"""
Class that holds the content of the form
"""
## Number of points to be plotted
npts = 0
## Minimum value of D_max
dmin = 0
## Maximum value of D_max
dmax = 0
def _get_values(self, event=None):
"""
Invoked when the user changes a value of the form.
Check that the values are of the right type.
:return: ExploreDialog.Event object if the content is good,
None otherwise
"""
# Flag to make sure that all values are good
flag = True
# Empty ExploreDialog.Event content
content_event = self.Event()
# Read each text control and make sure the type is valid
# Let the user know if a type is invalid by changing the
# background color of the control.
try:
content_event.npts = int(self.npts_ctl.GetValue())
self.npts_ctl.SetBackgroundColour(wx.WHITE)
self.npts_ctl.Refresh()
except:
flag = False
self.npts_ctl.SetBackgroundColour("pink")
self.npts_ctl.Refresh()
try:
content_event.dmin = float(self.dmin_ctl.GetValue())
self.dmin_ctl.SetBackgroundColour(wx.WHITE)
self.dmin_ctl.Refresh()
except:
flag = False
self.dmin_ctl.SetBackgroundColour("pink")
self.dmin_ctl.Refresh()
try:
content_event.dmax = float(self.dmax_ctl.GetValue())
self.dmax_ctl.SetBackgroundColour(wx.WHITE)
self.dmax_ctl.Refresh()
except:
flag = False
self.dmax_ctl.SetBackgroundColour("pink")
self.dmax_ctl.Refresh()
# If the content of the form is valid, return the content,
# otherwise return None
if flag:
if event is not None:
event.Skip(True)
return content_event
else:
return None
def _plot_output(self, event=None):
"""
Invoked when a new output type is selected for plotting,
or when a new computation is finished.
"""
# Get the output type selection
output_type = self.output_box.GetString(self.output_box.GetSelection())
# If the selected output type is part of the results ojbect,
# display the results.
# Note: by design, the output type should always be part of the
# results object.
if output_type in self.results.outputs:
self.plotpanel.plot.x = self.results.d_max
self.plotpanel.plot.y = self.results.outputs[output_type][2]
self.plotpanel.plot.name = '_nolegend_'
y_label = "\\rm{%s}" % self.results.outputs[output_type][0]
self.plotpanel.graph.yaxis(y_label,
self.results.outputs[output_type][1])
# Redraw
self.plotpanel.graph.render(self.plotpanel)
self.plotpanel.subplot.figure.canvas.draw_idle()
else:
msg = "ExploreDialog: the Results object's dictionary "
msg += "does not contain "
msg += "the [%s] output type. This must be indicative of "
msg += "a change in the " % str(output_type)
msg += "ExploreDialog code."
logger.error(msg)
def __do_layout(self):
"""
Do the layout of the dialog
"""
# Dialog box properties
self.SetTitle("D_max Explorer")
self.SetSize((600, 595))
sizer_main = wx.BoxSizer(wx.VERTICAL)
sizer_button = wx.BoxSizer(wx.HORIZONTAL)
sizer_params = wx.GridBagSizer(5, 5)
iy = 0
ix = 0
label_npts = wx.StaticText(self, -1, "Npts")
sizer_params.Add(label_npts, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
ix += 1
sizer_params.Add(self.npts_ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
self.npts_ctl.SetValue("%g" % DEFAULT_NPTS)
ix += 1
label_dmin = wx.StaticText(self, -1, "Min Distance [A]")
sizer_params.Add(label_dmin, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
ix += 1
sizer_params.Add(self.dmin_ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
self.dmin_ctl.SetValue(str(self._default_min))
ix += 1
label_dmax = wx.StaticText(self, -1, "Max Distance [A]")
sizer_params.Add(label_dmax, (iy, ix), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 15)
ix += 1
sizer_params.Add(self.dmax_ctl, (iy, ix), (1, 1),
wx.EXPAND | wx.ADJUST_MINSIZE, 0)
self.dmax_ctl.SetValue(str(self._default_max))
# Ouput selection box
selection_msg = wx.StaticText(self, -1, "Select a dependent variable:")
self.output_box = wx.ComboBox(self, -1, style=wx.CB_READONLY)
for item in self.results.outputs.keys():
self.output_box.Append(item, "")
self.output_box.SetStringSelection(DEFAULT_OUTPUT)
output_sizer = wx.GridBagSizer(5, 5)
output_sizer.Add(selection_msg, (0, 0), (1, 1),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 10)
output_sizer.Add(self.output_box, (0, 1), (1, 2),
wx.LEFT | wx.EXPAND | wx.ADJUST_MINSIZE, 10)
wx.EVT_COMBOBOX(self.output_box, -1, self._plot_output)
sizer_main.Add(output_sizer, 0, wx.EXPAND | wx.ALL, 10)
sizer_main.Add(self.plotpanel, 0, wx.EXPAND | wx.ALL, 10)
sizer_main.SetItemMinSize(self.plotpanel, 400, 400)
sizer_main.Add(sizer_params, 0, wx.EXPAND | wx.ALL, 10)
static_line_3 = wx.StaticLine(self, -1)
sizer_main.Add(static_line_3, 0, wx.EXPAND, 0)
# Bottom area with the close button
sizer_button.Add((20, 20), 1, wx.EXPAND | wx.ADJUST_MINSIZE, 0)
button_OK = wx.Button(self, wx.ID_OK, "Close")
sizer_button.Add(button_OK, 0, wx.LEFT | wx.RIGHT | wx.ADJUST_MINSIZE, 10)
sizer_main.Add(sizer_button, 0, wx.EXPAND | wx.BOTTOM | wx.TOP, 10)
self.SetAutoLayout(True)
self.SetSizer(sizer_main)
self.Layout()
self.Centre()
# Bind the Enter key to recalculation
self.Bind(wx.EVT_TEXT_ENTER, self._recalc)
def set_plot_unfocus(self):
"""
Not implemented
"""
pass
def send_focus_to_datapanel(self, name):
"""
The GUI manager sometimes calls this method
TODO: refactor this
"""
pass
def _recalc(self, event=None):
"""
Invoked when the user changed a value on the form.
Process the form and compute the output to be plottted.
"""
# Get the content of the form
content = self._get_values()
# If the content of the form is invalid, return and do nothing
if content is None:
return
# Results object to store the computation outputs.
results = Results()
# Loop over d_max values
for i in range(content.npts):
temp = (content.dmax - content.dmin) / (content.npts - 1.0)
d = content.dmin + i * temp
self.pr_state.d_max = d
try:
out, cov = self.pr_state.invert(self.nfunc)
# Store results
iq0 = self.pr_state.iq0(out)
rg = self.pr_state.rg(out)
pos = self.pr_state.get_positive(out)
pos_err = self.pr_state.get_pos_err(out, cov)
osc = self.pr_state.oscillations(out)
results.d_max.append(self.pr_state.d_max)
results.bck.append(self.pr_state.background)
results.chi2.append(self.pr_state.chi2)
results.iq0.append(iq0)
results.rg.append(rg)
results.pos.append(pos)
results.pos_err.append(pos_err)
results.osc.append(osc)
except:
# This inversion failed, skip this D_max value
msg = "ExploreDialog: inversion failed "
msg += "for D_max=%s\n%s" % (str(d), sys.exc_value)
logger.error(msg)
self.results = results
# Plot the selected output
self._plot_output()
| |
import datetime
import logging
import warnings
from werkzeug.exceptions import BadRequest
from rdr_service.dao.base_dao import BaseDao
from rdr_service.dao.hpo_dao import HPODao
from rdr_service.dao.metrics_cache_dao import (
MetricsAgeCacheDao,
MetricsCacheJobStatusDao,
MetricsEnrollmentStatusCacheDao,
MetricsGenderCacheDao,
MetricsLanguageCacheDao,
MetricsLifecycleCacheDao,
MetricsRaceCacheDao,
MetricsRegionCacheDao,
MetricsParticipantOriginCacheDao
)
from rdr_service.model.metrics_cache import MetricsCacheJobStatus
from rdr_service.model.participant_summary import ParticipantSummary
from rdr_service.participant_enums import (
EnrollmentStatus,
EnrollmentStatusV2,
MetricsAPIVersion,
MetricsCacheType,
Stratifications,
TEST_EMAIL_PATTERN,
TEST_HPO_NAME,
WithdrawalStatus,
MetricsCronJobStage
)
from rdr_service.dao.metrics_cache_dao import TEMP_TABLE_PREFIX
class ParticipantCountsOverTimeService(BaseDao):
def __init__(self):
super(ParticipantCountsOverTimeService, self).__init__(ParticipantSummary, alembic=True)
self.test_hpo_id = HPODao().get_by_name(TEST_HPO_NAME).hpoId
self.test_email_pattern = TEST_EMAIL_PATTERN
self.start_date = datetime.datetime.strptime("2017-05-30", "%Y-%m-%d").date()
self.end_date = datetime.datetime.now().date() + datetime.timedelta(days=10)
self.stage_number = MetricsCronJobStage.STAGE_ONE
self.cronjob_time = datetime.datetime.now().replace(microsecond=0)
def init_tmp_table(self):
with self.session() as session:
hpo_dao = HPODao()
hpo_list = hpo_dao.get_all()
for hpo in hpo_list:
if hpo.hpoId == self.test_hpo_id:
continue
temp_table_name = TEMP_TABLE_PREFIX + str(hpo.hpoId)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
session.execute('DROP TABLE IF EXISTS {};'.format(temp_table_name))
# generated columns can not be inserted any value, need to drop them
exclude_columns = ['retention_eligible_time', 'retention_eligible_status', 'was_ehr_data_available']
session.execute('CREATE TABLE {} LIKE participant_summary'.format(temp_table_name))
indexes_cursor = session.execute('SHOW INDEX FROM {}'.format(temp_table_name))
for exclude_column_name in exclude_columns:
session.execute('ALTER TABLE {} DROP COLUMN {}'.format(temp_table_name, exclude_column_name))
index_name_list = []
for index in indexes_cursor:
index_name_list.append(index[2])
index_name_list = list(set(index_name_list))
for index_name in index_name_list:
if index_name != 'PRIMARY':
session.execute('ALTER TABLE {} DROP INDEX {}'.format(temp_table_name, index_name))
# The ParticipantSummary table requires these, but there may not be a participant_summary for
# all participants that we insert
session.execute('ALTER TABLE {} MODIFY first_name VARCHAR(255)'.format(temp_table_name))
session.execute('ALTER TABLE {} MODIFY last_name VARCHAR(255)'.format(temp_table_name))
session.execute('ALTER TABLE {} MODIFY suspension_status SMALLINT'.format(temp_table_name))
session.execute('ALTER TABLE {} MODIFY participant_origin VARCHAR(80)'.format(temp_table_name))
session.execute('ALTER TABLE {} MODIFY deceased_status SMALLINT'.format(temp_table_name))
session.execute('ALTER TABLE {} MODIFY is_ehr_data_available TINYINT(1)'.format(temp_table_name))
columns_cursor = session.execute('SELECT * FROM {} LIMIT 0'.format(temp_table_name))
participant_fields = ['participant_id', 'biobank_id', 'sign_up_time', 'withdrawal_status',
'hpo_id', 'organization_id', 'site_id', 'participant_origin']
def get_field_name(name):
if name in participant_fields:
return 'p.' + name
else:
return 'ps.' + name
columns = map(get_field_name, columns_cursor.keys())
columns_str = ','.join(columns)
participant_sql = """
INSERT INTO
""" + temp_table_name + """
SELECT
""" + columns_str + """
FROM participant p
left join participant_summary ps on p.participant_id = ps.participant_id
WHERE p.hpo_id <> :test_hpo_id
AND p.is_ghost_id IS NOT TRUE
AND p.is_test_participant IS NOT TRUE
AND (ps.email IS NULL OR NOT ps.email LIKE :test_email_pattern)
AND p.withdrawal_status = :not_withdraw
AND p.hpo_id = :hpo_id
"""
params = {'test_hpo_id': self.test_hpo_id, 'test_email_pattern': self.test_email_pattern,
'not_withdraw': int(WithdrawalStatus.NOT_WITHDRAWN), 'hpo_id': hpo.hpoId}
session.execute('CREATE INDEX idx_sign_up_time ON {} (sign_up_time)'.format(temp_table_name))
session.execute('CREATE INDEX idx_date_of_birth ON {} (date_of_birth)'.format(temp_table_name))
session.execute('CREATE INDEX idx_consent_time ON {} (consent_for_study_enrollment_time)'
.format(temp_table_name))
session.execute('CREATE INDEX idx_member_time ON {} (enrollment_status_member_time)'
.format(temp_table_name))
session.execute('CREATE INDEX idx_sample_time ON {} (enrollment_status_core_stored_sample_time)'
.format(temp_table_name))
session.execute('CREATE INDEX idx_participant_origin ON {} (participant_origin)'
.format(temp_table_name))
session.execute(participant_sql, params)
logging.info('crete temp table for hpo_id: ' + str(hpo.hpoId))
session.execute('DROP TABLE IF EXISTS metrics_tmp_participant_origin;')
session.execute('CREATE TABLE metrics_tmp_participant_origin (participant_origin VARCHAR(50))')
participant_origin_sql = """
INSERT INTO metrics_tmp_participant_origin
SELECT DISTINCT participant_origin FROM participant
"""
session.execute(participant_origin_sql)
logging.info('Init temp table for metrics cron job.')
def clean_tmp_tables(self):
with self.session() as session:
hpo_dao = HPODao()
hpo_list = hpo_dao.get_all()
for hpo in hpo_list:
if hpo.hpoId == self.test_hpo_id:
continue
temp_table_name = TEMP_TABLE_PREFIX + str(hpo.hpoId)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
session.execute('DROP TABLE IF EXISTS {};'.format(temp_table_name))
def refresh_metrics_cache_data(self, start_date, end_date, stage_number):
self.start_date = start_date
self.end_date = end_date
self.stage_number = stage_number
# For public metrics job, calculate new result for stage one, and copy history result for stage two
if stage_number == MetricsCronJobStage.STAGE_ONE:
self.refresh_data_for_metrics_cache(MetricsLifecycleCacheDao(MetricsCacheType.PUBLIC_METRICS_EXPORT_API))
logging.info("Refresh MetricsLifecycleCache for Public Metrics API done.")
self.refresh_data_for_metrics_cache(MetricsGenderCacheDao(MetricsCacheType.PUBLIC_METRICS_EXPORT_API))
logging.info("Refresh MetricsGenderCache for Public Metrics API done.")
self.refresh_data_for_metrics_cache(MetricsAgeCacheDao(MetricsCacheType.PUBLIC_METRICS_EXPORT_API))
logging.info("Refresh MetricsAgeCache for Public Metrics API done.")
self.refresh_data_for_metrics_cache(MetricsRaceCacheDao(MetricsCacheType.PUBLIC_METRICS_EXPORT_API))
logging.info("Refresh MetricsRaceCache for Public Metrics API done.")
elif stage_number == MetricsCronJobStage.STAGE_TWO:
self.refresh_data_for_public_metrics_cache_stage_two(
MetricsLifecycleCacheDao(MetricsCacheType.PUBLIC_METRICS_EXPORT_API))
self.refresh_data_for_public_metrics_cache_stage_two(
MetricsGenderCacheDao(MetricsCacheType.PUBLIC_METRICS_EXPORT_API))
self.refresh_data_for_public_metrics_cache_stage_two(
MetricsAgeCacheDao(MetricsCacheType.PUBLIC_METRICS_EXPORT_API))
self.refresh_data_for_public_metrics_cache_stage_two(
MetricsRaceCacheDao(MetricsCacheType.PUBLIC_METRICS_EXPORT_API))
self.refresh_data_for_metrics_cache(MetricsEnrollmentStatusCacheDao())
logging.info("Refresh MetricsEnrollmentStatusCache done.")
self.refresh_data_for_metrics_cache(MetricsRegionCacheDao())
logging.info("Refresh MetricsRegionCache done.")
self.refresh_data_for_metrics_cache(MetricsLanguageCacheDao())
logging.info("Refresh MetricsLanguageCache done.")
self.refresh_data_for_metrics_cache(MetricsGenderCacheDao(MetricsCacheType.METRICS_V2_API))
logging.info("Refresh MetricsGenderCache for Metrics2API done.")
self.refresh_data_for_metrics_cache(MetricsRaceCacheDao(MetricsCacheType.METRICS_V2_API))
logging.info("Refresh MetricsRaceCache for Metrics2API done.")
def refresh_data_for_metrics_cache(self, dao):
status_dao = MetricsCacheJobStatusDao()
if self.stage_number == MetricsCronJobStage.STAGE_ONE:
kwargs = dict(
cacheTableName=dao.table_name,
type=str(dao.cache_type),
inProgress=True,
stage_one_complete=False,
stage_two_complete=False,
dateInserted=self.cronjob_time,
)
job_status_obj = MetricsCacheJobStatus(**kwargs)
status_dao.insert(job_status_obj)
hpo_dao = HPODao()
hpo_list = hpo_dao.get_all()
for hpo in hpo_list:
if hpo.hpoId == self.test_hpo_id:
continue
self.insert_cache_by_hpo(dao, hpo.hpoId)
status_dao.set_to_complete(dao.cache_type, dao.table_name, self.cronjob_time, self.stage_number)
if self.stage_number == MetricsCronJobStage.STAGE_TWO:
dao.delete_old_records()
def refresh_data_for_public_metrics_cache_stage_two(self, dao):
if self.stage_number != MetricsCronJobStage.STAGE_TWO:
return
status_dao = MetricsCacheJobStatusDao()
last_success_stage_two = status_dao.get_last_complete_stage_two_data_inserted_time(dao.table_name,
dao.cache_type)
if not last_success_stage_two:
logging.info(f'No last success stage two found for {dao.table_name}, calculate new data for stage two')
self.refresh_data_for_metrics_cache(dao)
else:
dao.update_historical_cache_data(self.cronjob_time, last_success_stage_two.dateInserted,
self.start_date, self.end_date)
status_dao.set_to_complete(dao.cache_type, dao.table_name, self.cronjob_time, self.stage_number)
dao.delete_old_records(n_days_ago=30)
def insert_cache_by_hpo(self, dao, hpo_id):
sql_arr = dao.get_metrics_cache_sql(hpo_id)
params = {'hpo_id': hpo_id, 'start_date': self.start_date, 'end_date': self.end_date,
'date_inserted': self.cronjob_time}
with dao.session() as session:
for sql in sql_arr:
session.execute(sql, params)
def get_filtered_results(
self, stratification, start_date, end_date, history, awardee_ids, enrollment_statuses, sample_time_def,
participant_origins, version
):
"""Queries DB, returns results in format consumed by front-end
:param start_date: Start date object
:param end_date: End date object
:param awardee_ids: indicate awardee ids
:param enrollment_statuses: indicate the enrollment status
:param sample_time_def: indicate how to filter the core participant
:param history: query for history data from metrics cache table
:param stratification: How to stratify (layer) results, as in a stacked bar chart
:param version: indicate the version of the result filter
:param participant_origins: indicate the participant origins
:return: Filtered, stratified results by date
"""
# Filters for participant_summary (ps) and participant (p) table
# filters_sql_ps is used in the general case when we're querying participant_summary
# filters_sql_p is used when also LEFT OUTER JOINing p and ps
facets = {
"enrollment_statuses": [
EnrollmentStatusV2(val) if version == MetricsAPIVersion.V2 else EnrollmentStatus(val)
for val in enrollment_statuses
],
"awardee_ids": awardee_ids,
}
filters_sql_ps = self.get_facets_sql(facets, stratification)
filters_sql_p = self.get_facets_sql(facets, stratification, participant_origins, table_prefix="p")
if str(history) == "TRUE" and stratification == Stratifications.TOTAL:
dao = MetricsEnrollmentStatusCacheDao(version=version)
return dao.get_total_interested_count(start_date, end_date, awardee_ids, enrollment_statuses,
participant_origins)
elif str(history) == "TRUE" and stratification == Stratifications.ENROLLMENT_STATUS:
dao = MetricsEnrollmentStatusCacheDao(version=version)
return dao.get_latest_version_from_cache(start_date, end_date, awardee_ids, enrollment_statuses,
participant_origins)
elif str(history) == "TRUE" and stratification == Stratifications.GENDER_IDENTITY:
dao = MetricsGenderCacheDao(version=version)
return dao.get_latest_version_from_cache(start_date, end_date, awardee_ids, enrollment_statuses,
participant_origins)
elif str(history) == "TRUE" and stratification == Stratifications.AGE_RANGE:
dao = MetricsAgeCacheDao()
return dao.get_latest_version_from_cache(start_date, end_date, awardee_ids, enrollment_statuses,
participant_origins)
elif str(history) == "TRUE" and stratification == Stratifications.RACE:
dao = MetricsRaceCacheDao(version=version)
return dao.get_latest_version_from_cache(start_date, end_date, awardee_ids, enrollment_statuses,
participant_origins)
elif str(history) == "TRUE" and stratification in [
Stratifications.FULL_STATE,
Stratifications.FULL_CENSUS,
Stratifications.FULL_AWARDEE,
Stratifications.GEO_STATE,
Stratifications.GEO_CENSUS,
Stratifications.GEO_AWARDEE,
]:
dao = MetricsRegionCacheDao(version=version)
return dao.get_latest_version_from_cache(end_date, stratification, awardee_ids, enrollment_statuses,
participant_origins)
elif str(history) == "TRUE" and stratification == Stratifications.LANGUAGE:
dao = MetricsLanguageCacheDao()
return dao.get_latest_version_from_cache(start_date, end_date, awardee_ids, enrollment_statuses)
elif str(history) == "TRUE" and stratification == Stratifications.LIFECYCLE:
dao = MetricsLifecycleCacheDao(version=version)
return dao.get_latest_version_from_cache(end_date, awardee_ids, enrollment_statuses, participant_origins)
elif stratification == Stratifications.PARTICIPANT_ORIGIN:
dao = MetricsParticipantOriginCacheDao()
return dao.get_participant_origins()
elif stratification == Stratifications.TOTAL:
strata = ["TOTAL"]
sql = self.get_total_sql(filters_sql_ps)
elif version == MetricsAPIVersion.V2 and stratification == Stratifications.ENROLLMENT_STATUS:
strata = [str(val) for val in EnrollmentStatusV2]
sql = self.get_enrollment_status_sql(filters_sql_p, sample_time_def, version)
elif stratification == Stratifications.ENROLLMENT_STATUS:
strata = [str(val) for val in EnrollmentStatus]
sql = self.get_enrollment_status_sql(filters_sql_p, sample_time_def)
elif stratification == Stratifications.EHR_CONSENT:
strata = ["EHR_CONSENT"]
sql = self.get_total_sql(filters_sql_ps, ehr_count=True)
elif stratification == Stratifications.EHR_RATIO:
strata = ["EHR_RATIO"]
sql = self.get_ratio_sql(filters_sql_ps)
else:
raise BadRequest("Invalid stratification: %s" % stratification)
params = {"start_date": start_date, "end_date": end_date}
results_by_date = []
with self.session() as session:
cursor = session.execute(sql, params)
# Iterate through each result (by date), transforming tabular SQL results
# into expected list-of-dictionaries response format
try:
results = cursor.fetchall()
for result in results:
date = result[-1]
metrics = {}
values = result[:-1]
for i, value in enumerate(values):
key = strata[i]
if value is None or (
stratification == Stratifications.ENROLLMENT_STATUS
and enrollment_statuses
and key not in enrollment_statuses
):
value = 0
metrics[key] = float(value) if stratification == Stratifications.EHR_RATIO else int(value)
results_by_date.append({"date": str(date), "metrics": metrics})
finally:
cursor.close()
return results_by_date
def get_facets_sql(self, facets, stratification, participant_origins=None, table_prefix="ps"):
"""Helper function to transform facets/filters selection into SQL
:param facets: Object representing facets and filters to apply to query results
:param stratification: How to stratify (layer) results, as in a stacked bar chart
:param participant_origins: indicate array of participant_origins
:param table_prefix: Either 'ps' (for participant_summary) or 'p' (for participant)
:return: SQL for 'WHERE' clause, reflecting filters specified in UI
"""
facets_sql = "WHERE "
facets_sql_list = []
facet_map = {"awardee_ids": "hpo_id", "enrollment_statuses": "enrollment_status"}
# the SQL for ENROLLMENT_STATUS stratify is using the enrollment status time
# instead of enrollment status
if "enrollment_statuses" in facets and stratification == Stratifications.ENROLLMENT_STATUS:
del facets["enrollment_statuses"]
del facet_map["enrollment_statuses"]
for facet in facets:
filter_prefix = table_prefix
filters_sql = []
db_field = facet_map[facet]
filters = facets[facet]
allow_null = False
if db_field == "enrollment_status":
filter_prefix = "ps"
allow_null = True
# TODO:
# Consider using an IN clause with bound parameters, instead, which
# would be simpler than this,
#
# TODO:
# Consider using bound parameters here instead of inlining the values
# in the SQL. We do that in other places using this function:
#
# dao/database_utils.py#L16
#
# This may help the SQL perform slightly better since the execution
# plan for the query can be cached when the only thing changing are
# the bound params.
for q_filter in filters:
if str(q_filter) != "":
filter_sql = filter_prefix + "." + db_field + " = " + str(int(q_filter))
if allow_null and str(int(q_filter)) == "1":
filters_sql.append("(" + filter_sql + " or " + filter_prefix + "." + db_field + " IS NULL)")
else:
filters_sql.append(filter_sql)
if len(filters_sql) > 0:
filters_sql = "(" + " OR ".join(filters_sql) + ")"
facets_sql_list.append(filters_sql)
if len(facets_sql_list) > 0:
facets_sql += " AND ".join(facets_sql_list) + " AND"
# TODO: use bound parameters
# See https://github.com/all-of-us/raw-data-repository/pull/669/files/a08be0ffe445da60ebca13b41d694368e4d42617#diff-6c62346e0cbe4a7fd7a45af6d4559c3e # pylint: disable=line-too-long
facets_sql += " %(table_prefix)s.hpo_id != %(test_hpo_id)s " % {
"table_prefix": table_prefix,
"test_hpo_id": self.test_hpo_id,
}
facets_sql += ' AND (ps.email IS NULL OR NOT ps.email LIKE "%(test_email_pattern)s")' % {
"test_email_pattern": self.test_email_pattern
}
facets_sql += " AND %(table_prefix)s.withdrawal_status = %(not_withdrawn)i" % {
"table_prefix": table_prefix,
"not_withdrawn": WithdrawalStatus.NOT_WITHDRAWN,
}
facets_sql += " AND p.is_ghost_id IS NOT TRUE AND p.is_test_participant IS NOT TRUE"
if participant_origins:
facets_sql += " AND p.participant_origin in ({}) "\
.format(",".join(["'" + origin + "'" for origin in participant_origins]))
return facets_sql
@staticmethod
def get_total_sql(filters_sql, ehr_count=False):
if ehr_count:
# date consented
date_field = "ps.consent_for_electronic_health_records_time"
else:
# date joined
date_field = "p.sign_up_time"
return """
SELECT
SUM(ps_sum.cnt * (ps_sum.day <= calendar.day)) registered_count,
calendar.day start_date
FROM calendar,
(
SELECT
COUNT(*) cnt,
DATE(%(date_field)s) day
FROM participant p
LEFT OUTER JOIN participant_summary ps
ON p.participant_id = ps.participant_id
%(filters)s
GROUP BY day
) ps_sum
WHERE calendar.day >= :start_date
AND calendar.day <= :end_date
GROUP BY calendar.day
ORDER BY calendar.day;
""" % {
"filters": filters_sql,
"date_field": date_field,
}
@staticmethod
def get_ratio_sql(filters_sql):
return """
select
ifnull(
(
select count(*)
from participant p
LEFT OUTER JOIN participant_summary ps
ON p.participant_id = ps.participant_id
%(filters)s
and ps.consent_for_electronic_health_records_time <= calendar.day
) / (
select count(*)
from participant p
LEFT OUTER JOIN participant_summary ps
ON p.participant_id = ps.participant_id
%(filters)s
and p.sign_up_time <= calendar.day
),
0
) ratio,
calendar.day start_date
from calendar
where calendar.day >= :start_date
and calendar.day <= :end_date
order by calendar.day;
""" % {
"filters": filters_sql
}
def get_enrollment_status_sql(self, filters_sql_p, filter_by="ORDERED", version=None):
core_sample_time_field_name = "enrollment_status_core_ordered_sample_time"
if filter_by == "STORED":
core_sample_time_field_name = "enrollment_status_core_stored_sample_time"
if version == MetricsAPIVersion.V2:
sql = """
SELECT
IFNULL((
SELECT SUM(results.enrollment_count)
FROM
(
SELECT DATE(p.sign_up_time) AS sign_up_time,
DATE(ps.consent_for_study_enrollment_time) AS consent_for_study_enrollment_time,
count(*) enrollment_count
FROM participant p
LEFT JOIN participant_summary ps ON p.participant_id = ps.participant_id
%(filters_p)s
GROUP BY DATE(p.sign_up_time), DATE(ps.consent_for_study_enrollment_time)
) AS results
WHERE c.day>=DATE(sign_up_time) AND consent_for_study_enrollment_time IS NULL
),0) AS registered,
IFNULL((
SELECT SUM(results.enrollment_count)
FROM
(
SELECT DATE(ps.consent_for_study_enrollment_time) AS consent_for_study_enrollment_time,
DATE(ps.enrollment_status_member_time) AS enrollment_status_member_time,
count(*) enrollment_count
FROM participant p
LEFT JOIN participant_summary ps ON p.participant_id = ps.participant_id
%(filters_p)s
GROUP BY DATE(ps.consent_for_study_enrollment_time), DATE(ps.enrollment_status_member_time)
) AS results
WHERE consent_for_study_enrollment_time IS NOT NULL AND c.day>=DATE(consent_for_study_enrollment_time) AND (enrollment_status_member_time IS NULL OR c.day < DATE(enrollment_status_member_time))
),0) AS participant,
IFNULL((
SELECT SUM(results.enrollment_count)
FROM
(
SELECT DATE(ps.enrollment_status_member_time) AS enrollment_status_member_time,
DATE(ps.%(core_sample_time_field_name)s) AS %(core_sample_time_field_name)s,
count(*) enrollment_count
FROM participant p
LEFT JOIN participant_summary ps ON p.participant_id = ps.participant_id
%(filters_p)s
GROUP BY DATE(ps.enrollment_status_member_time), DATE(ps.%(core_sample_time_field_name)s)
) AS results
WHERE enrollment_status_member_time IS NOT NULL AND day>=DATE(enrollment_status_member_time) AND (%(core_sample_time_field_name)s IS NULL OR day < DATE(%(core_sample_time_field_name)s))
),0) AS fully_consented,
IFNULL((
SELECT SUM(results.enrollment_count)
FROM
(
SELECT DATE(ps.%(core_sample_time_field_name)s) AS %(core_sample_time_field_name)s,
count(*) enrollment_count
FROM participant p
LEFT JOIN participant_summary ps ON p.participant_id = ps.participant_id
%(filters_p)s
GROUP BY DATE(ps.%(core_sample_time_field_name)s)
) AS results
WHERE %(core_sample_time_field_name)s IS NOT NULL AND day>=DATE(%(core_sample_time_field_name)s)
),0) AS core_participant,
day
FROM calendar c
WHERE c.day BETWEEN :start_date AND :end_date
""" % {
"filters_p": filters_sql_p,
"core_sample_time_field_name": core_sample_time_field_name,
}
else:
sql = """
SELECT
IFNULL((
SELECT SUM(results.enrollment_count)
FROM
(
SELECT DATE(p.sign_up_time) AS sign_up_time,
DATE(ps.enrollment_status_member_time) AS enrollment_status_member_time,
count(*) enrollment_count
FROM participant p
LEFT JOIN participant_summary ps ON p.participant_id = ps.participant_id
%(filters_p)s
GROUP BY DATE(p.sign_up_time), DATE(ps.enrollment_status_member_time)
) AS results
WHERE c.day>=DATE(sign_up_time) AND (enrollment_status_member_time IS NULL OR c.day < DATE(enrollment_status_member_time))
),0) AS registered_participants,
IFNULL((
SELECT SUM(results.enrollment_count)
FROM
(
SELECT DATE(ps.enrollment_status_member_time) AS enrollment_status_member_time,
DATE(ps.%(core_sample_time_field_name)s) AS %(core_sample_time_field_name)s,
count(*) enrollment_count
FROM participant p
LEFT JOIN participant_summary ps ON p.participant_id = ps.participant_id
%(filters_p)s
GROUP BY DATE(ps.enrollment_status_member_time), DATE(ps.%(core_sample_time_field_name)s)
) AS results
WHERE enrollment_status_member_time IS NOT NULL AND day>=DATE(enrollment_status_member_time) AND (%(core_sample_time_field_name)s IS NULL OR day < DATE(%(core_sample_time_field_name)s))
),0) AS member_participants,
IFNULL((
SELECT SUM(results.enrollment_count)
FROM
(
SELECT DATE(ps.%(core_sample_time_field_name)s) AS %(core_sample_time_field_name)s,
count(*) enrollment_count
FROM participant p
LEFT JOIN participant_summary ps ON p.participant_id = ps.participant_id
%(filters_p)s
GROUP BY DATE(ps.%(core_sample_time_field_name)s)
) AS results
WHERE %(core_sample_time_field_name)s IS NOT NULL AND day>=DATE(%(core_sample_time_field_name)s)
),0) AS full_participants,
day
FROM calendar c
WHERE c.day BETWEEN :start_date AND :end_date
""" % {
"filters_p": filters_sql_p,
"core_sample_time_field_name": core_sample_time_field_name,
}
return sql
| |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for manipulating Ports via the DB API"""
from oslo_utils import uuidutils
from ironic.common import exception
from ironic.tests.unit.db import base
from ironic.tests.unit.db import utils as db_utils
class DbPortTestCase(base.DbTestCase):
def setUp(self):
# This method creates a port for every test and
# replaces a test for creating a port.
super(DbPortTestCase, self).setUp()
self.node = db_utils.create_test_node(owner='12345',
lessee='54321')
self.portgroup = db_utils.create_test_portgroup(node_id=self.node.id)
self.port = db_utils.create_test_port(node_id=self.node.id,
portgroup_id=self.portgroup.id,
name='port-name')
def test_get_port_by_id(self):
res = self.dbapi.get_port_by_id(self.port.id)
self.assertEqual(self.port.address, res.address)
def test_get_port_by_uuid(self):
res = self.dbapi.get_port_by_uuid(self.port.uuid)
self.assertEqual(self.port.id, res.id)
def test_get_port_by_address(self):
res = self.dbapi.get_port_by_address(self.port.address)
self.assertEqual(self.port.id, res.id)
def test_get_port_by_address_filter_by_owner(self):
res = self.dbapi.get_port_by_address(self.port.address,
owner=self.node.owner)
self.assertEqual(self.port.id, res.id)
def test_get_port_by_address_filter_by_owner_no_match(self):
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_address,
self.port.address,
owner='54321')
def test_get_port_by_address_filter_by_project(self):
res = self.dbapi.get_port_by_address(self.port.address,
project=self.node.lessee)
self.assertEqual(self.port.id, res.id)
def test_get_port_by_address_filter_by_project_no_match(self):
self.assertRaises(exception.PortNotFound,
self.dbapi.get_port_by_address,
self.port.address,
project='55555')
def test_get_port_by_name(self):
res = self.dbapi.get_port_by_name(self.port.name)
self.assertEqual(self.port.id, res.id)
def test_get_port_list(self):
uuids = []
for i in range(1, 6):
port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=self.node.id,
address='52:54:00:cf:2d:4%s' % i)
uuids.append(str(port.uuid))
# Also add the uuid for the port created in setUp()
uuids.append(str(self.port.uuid))
res = self.dbapi.get_port_list()
res_uuids = [r.uuid for r in res]
self.assertCountEqual(uuids, res_uuids)
def test_get_port_list_sorted(self):
uuids = []
for i in range(1, 6):
port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=self.node.id,
address='52:54:00:cf:2d:4%s' % i)
uuids.append(str(port.uuid))
# Also add the uuid for the port created in setUp()
uuids.append(str(self.port.uuid))
res = self.dbapi.get_port_list(sort_key='uuid')
res_uuids = [r.uuid for r in res]
self.assertEqual(sorted(uuids), res_uuids)
self.assertRaises(exception.InvalidParameterValue,
self.dbapi.get_port_list, sort_key='foo')
def test_get_port_list_filter_by_node_owner(self):
another_node = db_utils.create_test_node(
uuid=uuidutils.generate_uuid())
uuids = []
for i in range(1, 3):
port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=another_node.id,
address='52:54:00:cf:2d:4%s' % i)
for i in range(4, 6):
port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=self.node.id,
address='52:54:00:cf:2d:4%s' % i)
uuids.append(str(port.uuid))
# Also add the uuid for the port created in setUp()
uuids.append(str(self.port.uuid))
res = self.dbapi.get_port_list(owner=self.node.owner)
res_uuids = [r.uuid for r in res]
self.assertCountEqual(uuids, res_uuids)
def test_get_port_list_filter_by_node_project(self):
another_node = db_utils.create_test_node(
uuid=uuidutils.generate_uuid())
lessee_node = db_utils.create_test_node(uuid=uuidutils.generate_uuid(),
lessee=self.node.owner)
uuids = []
for i in range(1, 3):
port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=lessee_node.id,
address='52:54:00:cf:2d:4%s' % i)
uuids.append(str(port.uuid))
for i in range(4, 6):
port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=another_node.id,
address='52:54:00:cf:2d:4%s' % i)
for i in range(7, 9):
port = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=self.node.id,
address='52:54:00:cf:2d:4%s' % i)
uuids.append(str(port.uuid))
# Also add the uuid for the port created in setUp()
uuids.append(str(self.port.uuid))
res = self.dbapi.get_port_list(project=self.node.owner)
res_uuids = [r.uuid for r in res]
self.assertCountEqual(uuids, res_uuids)
def test_get_ports_by_node_id(self):
res = self.dbapi.get_ports_by_node_id(self.node.id)
self.assertEqual(self.port.address, res[0].address)
def test_get_ports_by_node_id_filter_by_node_owner(self):
res = self.dbapi.get_ports_by_node_id(self.node.id,
owner=self.node.owner)
self.assertEqual(self.port.address, res[0].address)
def test_get_ports_by_node_id_filter_by_node_owner_no_match(self):
res = self.dbapi.get_ports_by_node_id(self.node.id,
owner='54321')
self.assertEqual([], res)
def test_get_ports_by_node_id_filter_by_node_project(self):
res = self.dbapi.get_ports_by_node_id(self.node.id,
project=self.node.lessee)
self.assertEqual(self.port.address, res[0].address)
def test_get_ports_by_node_id_filter_by_node_project_no_match(self):
res = self.dbapi.get_ports_by_node_id(self.node.id,
owner='11111')
self.assertEqual([], res)
def test_get_ports_by_node_id_that_does_not_exist(self):
self.assertEqual([], self.dbapi.get_ports_by_node_id(99))
def test_get_ports_by_portgroup_id(self):
res = self.dbapi.get_ports_by_portgroup_id(self.portgroup.id)
self.assertEqual(self.port.address, res[0].address)
def test_get_ports_by_portgroup_id_filter_by_node_owner(self):
res = self.dbapi.get_ports_by_portgroup_id(self.portgroup.id,
owner=self.node.owner)
self.assertEqual(self.port.address, res[0].address)
def test_get_ports_by_portgroup_id_filter_by_node_owner_no_match(self):
res = self.dbapi.get_ports_by_portgroup_id(self.portgroup.id,
owner='54321')
self.assertEqual([], res)
def test_get_ports_by_portgroup_id_filter_by_node_project(self):
res = self.dbapi.get_ports_by_portgroup_id(self.portgroup.id,
project=self.node.lessee)
self.assertEqual(self.port.address, res[0].address)
def test_get_ports_by_portgroup_id_filter_by_node_project_no_match(self):
res = self.dbapi.get_ports_by_portgroup_id(self.portgroup.id,
project='11111')
self.assertEqual([], res)
def test_get_ports_by_portgroup_id_that_does_not_exist(self):
self.assertEqual([], self.dbapi.get_ports_by_portgroup_id(99))
def test_destroy_port(self):
self.dbapi.destroy_port(self.port.id)
self.assertRaises(exception.PortNotFound,
self.dbapi.destroy_port, self.port.id)
def test_update_port(self):
old_address = self.port.address
new_address = 'ff.ee.dd.cc.bb.aa'
self.assertNotEqual(old_address, new_address)
res = self.dbapi.update_port(self.port.id, {'address': new_address})
self.assertEqual(new_address, res.address)
def test_update_port_uuid(self):
self.assertRaises(exception.InvalidParameterValue,
self.dbapi.update_port, self.port.id,
{'uuid': ''})
def test_update_port_duplicated_address(self):
address1 = self.port.address
address2 = 'aa-bb-cc-11-22-33'
port2 = db_utils.create_test_port(uuid=uuidutils.generate_uuid(),
node_id=self.node.id,
address=address2)
self.assertRaises(exception.MACAlreadyExists,
self.dbapi.update_port, port2.id,
{'address': address1})
def test_create_port_duplicated_address(self):
self.assertRaises(exception.MACAlreadyExists,
db_utils.create_test_port,
uuid=uuidutils.generate_uuid(),
node_id=self.node.id,
address=self.port.address)
def test_create_port_duplicated_uuid(self):
self.assertRaises(exception.PortAlreadyExists,
db_utils.create_test_port,
uuid=self.port.uuid,
node_id=self.node.id,
address='aa-bb-cc-33-11-22')
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DenseNet implementation with TPU support.
Original paper: (https://arxiv.org/abs/1608.06993)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import tensorflow as tf
import densenet_model
import vgg_preprocessing
from tensorflow.contrib.training.python.training import evaluation
FLAGS = flags.FLAGS
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
"gcp_project", default=None,
help="Project name for the Cloud TPU-enabled project. If not specified, we "
"will attempt to automatically detect the GCE project from metadata.")
flags.DEFINE_string(
"tpu_zone", default=None,
help="GCE zone where the Cloud TPU is located in. If not specified, we "
"will attempt to automatically detect the GCE project from metadata.")
# Model specific paramenters
flags.DEFINE_string(
"data_dir",
default="",
help="The directory where the ImageNet input data is stored.")
flags.DEFINE_string(
"model_dir",
default="",
help="The directory where the model will be stored.")
flags.DEFINE_integer(
"train_batch_size", default=1024, help="Batch size for training.")
flags.DEFINE_integer(
"eval_batch_size", default=1024, help="Batch size for evaluation.")
flags.DEFINE_integer(
"num_shards", default=8, help="Number of shards (TPU cores).")
flags.DEFINE_integer(
"iterations_per_loop",
default=None,
help=("Number of interior TPU cycles to run before returning to the host. "
"This is different from the number of steps run before each eval "
"and should primarily be used only if you need more incremental "
"logging during training. Setting this to None (default) will "
"set the iterations_per_loop to be as large as possible (i.e. "
"perform every call to train in a single TPU loop."))
flags.DEFINE_integer(
"prefetch_dataset_buffer_size", 8 * 1024 * 1024,
"Number of bytes prefetched in read buffer. 0 means no buffering.")
flags.DEFINE_integer("num_files_infeed", 8,
"Number of training files to read in parallel.")
flags.DEFINE_integer("shuffle_buffer_size", 1000,
"Size of the shuffle buffer used to randomize ordering")
# For mode=train and mode=train_and_eval
flags.DEFINE_integer(
"steps_per_checkpoint",
default=1000,
help=("Controls how often checkpoints are generated. More steps per "
"checkpoint = higher utilization of TPU and generally higher "
"steps/sec"))
# For mode=eval
flags.DEFINE_integer(
"min_eval_interval",
default=180,
help="Minimum seconds between evaluations.")
# For mode=eval
flags.DEFINE_integer(
"eval_timeout",
default=None,
help="Maximum seconds between checkpoints before evaluation terminates.")
flags.DEFINE_integer(
"network_depth",
default=121,
help="Number of levels in the Densenet network")
flags.DEFINE_integer(
"train_steps",
default=130000, # Roughly 100 epochs
help="The number of steps to use for training.")
# For mode=train_and_eval, evaluation occurs at each steps_per_checkpoint
# Note: independently of steps_per_checkpoint, estimator will save the most
# recent checkpoint every 10 minutes by default for train_and_eval
flags.DEFINE_string(
"mode",
default="train_and_eval",
help=("Mode to run: train, eval, train_and_eval "
"(default, interleaved train & eval)."))
# Dataset constants
_LABEL_CLASSES = 1001
_NUM_CHANNELS = 3
_NUM_TRAIN_IMAGES = 1281167
_NUM_EVAL_IMAGES = 50000
_MOMENTUM = 0.9
_WEIGHT_DECAY = 1e-4
# Learning hyperaparmeters
_BASE_LR = 0.1
_LR_SCHEDULE = [ # (LR multiplier, epoch to start)
(1.0 / 6, 0), (2.0 / 6, 1), (3.0 / 6, 2), (4.0 / 6, 3), (5.0 / 6, 4),
(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80), (0.0001, 90)
]
def learning_rate_schedule(current_epoch):
"""Handles linear scaling rule, gradual warmup, and LR decay."""
scaled_lr = _BASE_LR * (FLAGS.train_batch_size / 256.0)
decay_rate = scaled_lr
for mult, start_epoch in _LR_SCHEDULE:
decay_rate = tf.where(current_epoch < start_epoch, decay_rate,
scaled_lr * mult)
return decay_rate
class ImageNetInput(object):
"""Wrapper class that acts as the input_fn to TPUEstimator."""
def __init__(self, is_training, data_dir=None):
self.is_training = is_training
if data_dir:
self.data_dir = data_dir
elif FLAGS.data_dir:
self.data_dir = FLAGS.data_dir
else:
self.data_dir = None
def dataset_parser(self, value):
"""Parse an Imagenet record from value."""
keys_to_features = {
"image/encoded": tf.FixedLenFeature((), tf.string, ""),
"image/format": tf.FixedLenFeature((), tf.string, "jpeg"),
"image/class/label": tf.FixedLenFeature([], tf.int64, -1),
"image/class/text": tf.FixedLenFeature([], tf.string, ""),
"image/object/bbox/xmin": tf.VarLenFeature(dtype=tf.float32),
"image/object/bbox/ymin": tf.VarLenFeature(dtype=tf.float32),
"image/object/bbox/xmax": tf.VarLenFeature(dtype=tf.float32),
"image/object/bbox/ymax": tf.VarLenFeature(dtype=tf.float32),
"image/object/class/label": tf.VarLenFeature(dtype=tf.int64),
}
parsed = tf.parse_single_example(value, keys_to_features)
image = tf.image.decode_image(
tf.reshape(parsed["image/encoded"], shape=[]), _NUM_CHANNELS)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# TODO(shivaniagrawal): height and width of image from model
image = vgg_preprocessing.preprocess_image(
image=image,
output_height=224,
output_width=224,
is_training=self.is_training)
label = tf.cast(
tf.reshape(parsed["image/class/label"], shape=[]), dtype=tf.int32)
return image, tf.one_hot(label, _LABEL_CLASSES)
def __call__(self, params):
"""Input function which provides a single batch for train or eval."""
if self.data_dir is None:
tf.logging.info('Using fake input.')
return self._input_fn_null(params)
# Retrieves the batch size for the current shard. The # of shards is
# computed according to the input pipeline deployment. See
# `tf.contrib.tpu.RunConfig` for details.
batch_size = params["batch_size"]
# Shuffle the filenames to ensure better randomization
file_pattern = os.path.join(self.data_dir, "train-*"
if self.is_training else "validation-*")
dataset = tf.data.Dataset.list_files(file_pattern, shuffle=False)
if self.is_training:
dataset = dataset.shuffle(buffer_size=1024) # 1024 files in dataset
if self.is_training:
dataset = dataset.repeat()
def prefetch_dataset(filename):
buffer_size = FLAGS.prefetch_dataset_buffer_size
dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
return dataset
dataset = dataset.apply(
tf.contrib.data.parallel_interleave(
prefetch_dataset, cycle_length=FLAGS.num_files_infeed, sloppy=True))
dataset = dataset.shuffle(FLAGS.shuffle_buffer_size)
dataset = dataset.map(self.dataset_parser, num_parallel_calls=128)
dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(2) # Prefetch overlaps in-feed with training
return dataset
def _input_fn_null(self, params):
"""Input function which provides null (black) images."""
batch_size = params["batch_size"]
null_image = tf.zeros([224, 224, 3], tf.float32)
null_label = tf.one_hot(tf.constant(0, tf.int32), _LABEL_CLASSES)
dataset = tf.data.Dataset.from_tensors((null_image, null_label))
dataset = dataset.repeat(batch_size).batch(batch_size, drop_remainder=True)
dataset = dataset.take(1).cache().repeat()
tf.logging.info("Input dataset: %s", str(dataset))
return dataset
def model_fn(features, labels, mode, params):
"""Our model_fn for Densenet to be used with our Estimator."""
tf.logging.info("model_fn")
if FLAGS.network_depth == 169:
logits = densenet_model.densenet_imagenet_169(
features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))
elif FLAGS.network_depth == 201:
logits = densenet_model.densenet_imagenet_201(
features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))
elif FLAGS.network_depth == 121:
logits = densenet_model.densenet_imagenet_121(
features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))
else:
tf.logging.info("Number of layers not supported, revert to 121")
logits = densenet_model.densenet_imagenet_121(
features, is_training=(mode == tf.estimator.ModeKeys.TRAIN))
# Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels)
# Add weight decay to the loss. We exclude weight decay on the batch
# normalization variables because it slightly improves accuracy.
loss = cross_entropy + _WEIGHT_DECAY * tf.add_n([
tf.nn.l2_loss(v)
for v in tf.trainable_variables()
if "batch_normalization" not in v.name
])
global_step = tf.train.get_global_step()
current_epoch = (
tf.cast(global_step, tf.float32) / params["batches_per_epoch"])
learning_rate = learning_rate_schedule(current_epoch)
# TODO(chrisying): this is a hack to get the LR and epoch for Tensorboard.
# Reimplement this when TPU training summaries are supported.
lr_repeat = tf.reshape(
tf.tile(tf.expand_dims(learning_rate, 0), [
params["batch_size"],
]), [params["batch_size"], 1])
ce_repeat = tf.reshape(
tf.tile(tf.expand_dims(current_epoch, 0), [
params["batch_size"],
]), [params["batch_size"], 1])
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate, momentum=_MOMENTUM)
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
# Batch norm requires update_ops to be added as a train_op dependency.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss, global_step)
else:
train_op = None
eval_metrics = None
if mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(labels, logits, lr_repeat, ce_repeat):
"""Evaluation metric fn. Performed on CPU, do not reference TPU ops."""
predictions = tf.argmax(logits, axis=1)
accuracy = tf.metrics.accuracy(tf.argmax(labels, axis=1), predictions)
lr = tf.metrics.mean(lr_repeat)
ce = tf.metrics.mean(ce_repeat)
return {"accuracy": accuracy, "learning_rate": lr, "current_epoch": ce}
eval_metrics = (metric_fn, [labels, logits, lr_repeat, ce_repeat])
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, loss=loss, train_op=train_op, eval_metrics=eval_metrics)
def main(unused_argv):
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu,
zone=FLAGS.tpu_zone,
project=FLAGS.gcp_project)
batches_per_epoch = _NUM_TRAIN_IMAGES / FLAGS.train_batch_size
steps_per_checkpoint = FLAGS.steps_per_checkpoint
iterations_per_loop = FLAGS.iterations_per_loop
eval_steps = _NUM_EVAL_IMAGES // FLAGS.eval_batch_size
if iterations_per_loop is None or steps_per_checkpoint < iterations_per_loop:
iterations_per_loop = steps_per_checkpoint
if FLAGS.mode == "eval":
iterations_per_loop = eval_steps
params = {
"batches_per_epoch": batches_per_epoch,
}
config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=FLAGS.model_dir,
save_checkpoints_steps=steps_per_checkpoint,
log_step_count_steps=iterations_per_loop,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=iterations_per_loop, num_shards=FLAGS.num_shards))
densenet_estimator = tf.contrib.tpu.TPUEstimator(
model_fn=model_fn,
config=config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
params=params)
if FLAGS.mode == "train":
tf.logging.info("Training for %d steps (%.2f epochs in total)." %
(FLAGS.train_steps, FLAGS.train_steps / batches_per_epoch))
densenet_estimator.train(
input_fn=ImageNetInput(True), max_steps=FLAGS.train_steps)
elif FLAGS.mode == "train_and_eval":
current_step = 0
tf.logging.info("Training for %d steps (%.2f epochs in total). Current "
"step %d" %
(FLAGS.train_steps, FLAGS.train_steps / batches_per_epoch,
current_step))
while current_step < FLAGS.train_steps:
next_checkpoint = min(current_step + steps_per_checkpoint,
FLAGS.train_steps)
num_steps = next_checkpoint - current_step
current_step = next_checkpoint
densenet_estimator.train(input_fn=ImageNetInput(True), steps=num_steps)
tf.logging.info("Starting to evaluate.")
eval_results = densenet_estimator.evaluate(
input_fn=ImageNetInput(False),
steps=_NUM_EVAL_IMAGES // FLAGS.eval_batch_size)
tf.logging.info("Eval results: %s" % eval_results)
else:
def terminate_eval():
tf.logging.info("Terminating eval after %d seconds of no checkpoints" %
FLAGS.eval_timeout)
return True
# Run evaluation when there"s a new checkpoint
# If the evaluation worker is delayed in processing a new checkpoint,
# the checkpoint file may be deleted by the trainer before it can be
# evaluated.
# Ignore the error in this case.
for ckpt in evaluation.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout,
timeout_fn=terminate_eval):
tf.logging.info("Starting to evaluate.")
try:
eval_results = densenet_estimator.evaluate(
input_fn=ImageNetInput(False),
steps=eval_steps,
checkpoint_path=ckpt)
tf.logging.info("Eval results: %s" % eval_results)
except tf.errors.NotFoundError:
tf.logging.info("Checkpoint %s no longer exists, skipping checkpoint")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
| |
import mock
import random
import libvirt
import difflib
import unittest
import itertools
import ipaddress
from see.context.resources import network
def compare(text1, text2):
"""Utility function for comparing text and returning differences."""
diff = difflib.ndiff(str(text1).splitlines(True),
str(text2).splitlines(True))
return '\n' + '\n'.join(diff)
class NetworkXMLTest(unittest.TestCase):
def test_ip(self):
"""NETWORK XML with given IP."""
config = """<network>
<forward mode="nat"/>
<ip address="192.168.235.1" netmask="255.255.255.0">
<dhcp>
<range start="192.168.235.2" end="192.168.235.128"/>
</dhcp>
</ip>
</network>
"""
expected = """<network>
<forward mode="nat" />
<ip address="192.168.235.1" netmask="255.255.255.0">
<dhcp>
<range end="192.168.235.128" start="192.168.235.2" />
</dhcp>
</ip>
<name>foo</name><uuid>foo</uuid><bridge name="virbr-foo" /></network>"""
results = network.network_xml('foo', config)
self.assertEqual(results, expected, compare(results, expected))
def test_ip_modifies(self):
"""NETWORK Name and UUID are modified if existing."""
config = """<network>
<name>bar</name>
<uuid>bar</uuid>
<bridge name="virbr-bar"/>
<forward mode="nat"/>
<ip address="192.168.235.1" netmask="255.255.255.0">
<dhcp>
<range start="192.168.235.2" end="192.168.235.128"/>
</dhcp>
</ip>
</network>
"""
expected = """<network>
<name>foo</name>
<uuid>foo</uuid>
<bridge name="virbr-foo" />
<forward mode="nat" />
<ip address="192.168.235.1" netmask="255.255.255.0">
<dhcp>
<range end="192.168.235.128" start="192.168.235.2" />
</dhcp>
</ip>
</network>"""
results = network.network_xml('foo', config)
self.assertEqual(results, expected, compare(results, expected))
def test_ip_address(self):
"""NETWORK RuntimeError is raised if both address and <ip> are specified."""
config = """<network>
<forward mode="nat"/>
<ip address="192.168.235.1" netmask="255.255.255.0">
<dhcp>
<range start="192.168.235.2" end="192.168.235.128"/>
</dhcp>
</ip>
</network>
"""
with self.assertRaises(RuntimeError):
network.network_xml('foo', config, address=True)
def test_no_ip_address(self):
"""NETWORK XML with address."""
config = """<network>
<forward mode="nat"/>
</network>
"""
expected = """<network>
<forward mode="nat" />
<name>foo</name><uuid>foo</uuid><bridge name="virbr-foo" />""" + \
"""<ip address="192.168.1.1" netmask="255.255.255.0">""" + \
"""<dhcp><range end="192.168.1.254" start="192.168.1.2" />""" +\
"""</dhcp></ip></network>"""
address = ipaddress.IPv4Network(u'192.168.1.0/24')
results = network.network_xml('foo', config, address=address)
self.assertEqual(results, expected, compare(results, expected))
class ValidAddressTest(unittest.TestCase):
def test_valid(self):
"""NETWORK A valid address is retrieved."""
virnetwork = mock.Mock()
hypervisor = mock.Mock()
virnetwork.XMLDesc.side_effect = (
lambda x:
'<a><ip address="192.168.%s.1" netmask="255.255.255.0"/></a>'
% random.randint(1, 255))
hypervisor.listNetworks.return_value = ('foo', 'bar', 'baz')
hypervisor.networkLookupByName.return_value = virnetwork
configuration = {'ipv4': '192.168.0.0',
'prefix': 16,
'subnet_prefix': 24}
self.assertTrue(network.generate_address(hypervisor, configuration) in
[ipaddress.IPv4Network(u'192.168.{}.0/24'.format(i))
for i in range(1, 255)])
def test_randomised(self):
"""NETWORK Address generation is randomised."""
virnetwork = mock.Mock()
hypervisor = mock.Mock()
virnetwork.XMLDesc.side_effect = (
lambda x:
'<a><ip address="192.168.%s.1" netmask="255.255.255.0"/></a>'
% random.randint(1, 255))
hypervisor.listNetworks.return_value = ('foo', 'bar', 'baz')
hypervisor.networkLookupByName.return_value = virnetwork
configuration = {'ipv4': '192.168.0.0',
'prefix': 16,
'subnet_prefix': 24}
addresses = set(network.generate_address(hypervisor, configuration)
for _ in range(10))
self.assertTrue(len(addresses) > 1)
def test_invalid(self):
"""NETWORK ValueError is raised if configuration address is invalid."""
virnetwork = mock.Mock()
hypervisor = mock.Mock()
virnetwork.XMLDesc.side_effect = (
lambda x:
'<a><ip address="192.168.%s.1" netmask="255.255.255.0"/></a>'
% random.randint(1, 255))
hypervisor.listNetworks.return_value = ('foo', 'bar', 'baz')
hypervisor.networkLookupByName.return_value = virnetwork
configuration = {'ipv4': '192.168.0.1',
'prefix': 16,
'subnet_prefix': 24}
with self.assertRaises(ValueError):
network.generate_address(hypervisor, configuration)
def test_no_ip(self):
"""NETWORK RuntimeError is raised if all IPs are taken."""
counter = itertools.count()
virnetwork = mock.Mock()
hypervisor = mock.Mock()
virnetwork.XMLDesc.side_effect = (
lambda x:
'<a><ip address="192.168.%s.1" netmask="255.255.255.0"/></a>'
% next(counter))
hypervisor.listNetworks.return_value = range(0, 256)
hypervisor.networkLookupByName.return_value = virnetwork
configuration = {'ipv4': '192.168.0.0',
'prefix': 16,
'subnet_prefix': 24}
with self.assertRaises(RuntimeError):
network.generate_address(hypervisor, configuration)
class CreateTest(unittest.TestCase):
def test_create_too_many_attempts(self):
"""NETWORK RuntimeError is raised if too many fails to create a network."""
xml = '<network><forward mode="nat"/></network>'
network.MAX_ATTEMPTS = 3
hypervisor = mock.Mock()
hypervisor.listNetworks.return_value = []
hypervisor.networkCreateXML.side_effect = libvirt.libvirtError('BOOM')
configuration = {'configuration': 'bar',
'dynamic_address': {'ipv4': '10.0.0.0',
'prefix': 16,
'subnet_prefix': 24}}
with mock.patch('see.context.resources.network.open',
mock.mock_open(read_data=xml), create=True):
try:
network.create(hypervisor, 'foo', configuration)
except RuntimeError as error:
self.assertEqual(
error.args,
("Exceeded failed attempts (3) to get IP address.",
"Last error: BOOM"))
def test_create_xml(self):
"""NETWORK Provided XML is used."""
xml = """<network><forward mode="nat"/><ip address="192.168.1.1" netmask="255.255.255.0">""" + \
"""<dhcp><range end="192.168.1.128" start="192.168.1.2"/></dhcp></ip></network>"""
expected = """<network><forward mode="nat" /><ip address="192.168.1.1" netmask="255.255.255.0">""" + \
"""<dhcp><range end="192.168.1.128" start="192.168.1.2" /></dhcp></ip>""" + \
"""<name>foo</name><uuid>foo</uuid><bridge name="virbr-foo" /></network>"""
hypervisor = mock.Mock()
hypervisor.listNetworks.return_value = []
with mock.patch('see.context.resources.network.open', mock.mock_open(read_data=xml), create=True):
network.create(hypervisor, 'foo', {'configuration': '/foo'})
results = hypervisor.networkCreateXML.call_args_list[0][0][0]
self.assertEqual(results, expected, compare(results, expected))
def test_create_no_xml_file(self):
"""NETWORK Default XML is used if none is provided."""
expected = """<forward mode="nat" />"""
hypervisor = mock.Mock()
hypervisor.listNetworks.return_value = []
network.create(hypervisor, 'foo', {'dynamic_address':
{'ipv4': '192.168.0.0',
'prefix': 16,
'subnet_prefix': 24}})
results = hypervisor.networkCreateXML.call_args_list[0][0][0]
self.assertTrue(expected in results, compare(results, expected))
def test_create_xml_error(self):
"""NETWORK RuntimeError is raised in case of creation error."""
xml = """<network><forward mode="nat"/><ip address="192.168.1.1" netmask="255.255.255.0">""" + \
"""<dhcp><range end="192.168.1.128" start="192.168.1.2"/></dhcp></ip></network>"""
hypervisor = mock.Mock()
hypervisor.listNetworks.return_value = []
hypervisor.networkCreateXML.side_effect = libvirt.libvirtError('BOOM')
with mock.patch('see.context.resources.network.open', mock.mock_open(read_data=xml), create=True):
with self.assertRaises(RuntimeError) as error:
network.create(hypervisor, 'foo', {'configuration': '/foo'})
self.assertEqual(str(error), "Unable to create new network: BOOM.")
def test_create_empty_config(self):
"""NETWORK RuntimeError raised if empty configuration."""
hypervisor = mock.Mock()
with self.assertRaises(RuntimeError):
network.create(hypervisor, 'foo', {})
def test_delete(self):
"""NETWORK Network is destroyed on delete()."""
net = mock.Mock()
network.delete(net)
self.assertTrue(net.destroy.called)
class LookupTest(unittest.TestCase):
def test_lookup(self):
"""NETWORK Network lookup passes correct parameters to hypervisor."""
xml = """<domain><interface type="network">""" +\
"""<source network="foo" /></interface></domain>"""
domain = mock.Mock()
hypervisor = mock.Mock()
domain.XMLDesc.return_value = xml
domain.connect.return_value = hypervisor
network.lookup(domain)
hypervisor.networkLookupByName.assert_called_with('foo')
def test_lookup_no_network(self):
"""NETWORK None is return if domain is not associated with any Network."""
xml = """<domain></domain>"""
domain = mock.Mock()
hypervisor = mock.Mock()
domain.XMLDesc.return_value = xml
domain.connect.return_value = hypervisor
self.assertEqual(network.lookup(domain), None)
| |
##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
from IECore import *
import math
class TriangulatorTest( unittest.TestCase ) :
def testV3f( self ) :
# _ _
# | |_| |
# |_ |
# _| |
# |____|
p = V3fVectorData(
[
V3f( 0, 0, 0 ),
V3f( 3, 0, 0 ),
V3f( 3, 4, 0 ),
V3f( 2, 4, 0 ),
V3f( 2, 3, 0 ),
V3f( 1, 3, 0 ),
V3f( 1, 4, 0 ),
V3f( 0, 4, 0 ),
V3f( 0, 2, 0 ),
V3f( 1, 2, 0 ),
V3f( 1, 1, 0 ),
V3f( 0, 1, 0 )
]
)
inMesh = MeshPrimitive( IntVectorData( [ 12 ] ), IntVectorData( range( 0, 12 ) ), "linear", p )
builder = MeshPrimitiveBuilder()
triangulator = V3fTriangulator( builder )
triangulator.triangulate( p )
outMesh = builder.mesh()
self.assertNotEqual( p, outMesh["P"].data )
pp = p.copy()
pp.setInterpretation( GeometricData.Interpretation.Point )
self.assertEqual( pp, outMesh["P"].data )
self.assertEqual( len( outMesh.verticesPerFace ), len( p ) - 2 )
for x in outMesh.verticesPerFace :
self.assertEqual( x, 3 )
self.assertEqual( outMesh.variableSize( PrimitiveVariable.Interpolation.Vertex ), 12 )
e = PrimitiveEvaluator.create( outMesh )
self.assertEqual( e.surfaceArea(), 10 )
def testOneHole( self ) :
# ______
# | __ |
# | |_| |
# |_____|
#
outer = V3fVectorData(
[
V3f( 0, 0, 0 ),
V3f( 3, 0, 0 ),
V3f( 3, 3, 0 ),
V3f( 0, 3, 0 ),
]
)
inner = V3fVectorData(
[
V3f( 1, 1, 0 ),
V3f( 1, 2, 0 ),
V3f( 2, 2, 0 ),
V3f( 2, 1, 0 ),
]
)
builder = MeshPrimitiveBuilder()
triangulator = V3fTriangulator( builder )
triangulator.triangulate( [ outer, inner ] )
outMesh = builder.mesh()
e = PrimitiveEvaluator.create( outMesh )
self.assertEqual( e.surfaceArea(), 8 )
def testTwoHoles( self ) :
# __________
# | __ __ |
# | |_| |_| |
# |__________|
#
outer = V3fVectorData(
[
V3f( 0, 0, 0 ),
V3f( 5, 0, 0 ),
V3f( 5, 3, 0 ),
V3f( 0, 3, 0 ),
]
)
inner1 = V3fVectorData(
[
V3f( 1, 1, 0 ),
V3f( 1, 2, 0 ),
V3f( 2, 2, 0 ),
V3f( 2, 1, 0 ),
]
)
inner2 = V3fVectorData(
[
V3f( 3, 1, 0 ),
V3f( 3, 2, 0 ),
V3f( 4, 2, 0 ),
V3f( 4, 1, 0 ),
]
)
builder = MeshPrimitiveBuilder()
triangulator = V3fTriangulator( builder )
triangulator.triangulate( [ outer, inner1, inner2 ] )
outMesh = builder.mesh()
e = PrimitiveEvaluator.create( outMesh )
self.assertEqual( e.surfaceArea(), 13 )
def testBigCircle( self ) :
numPoints = 10000
loop = V3fVectorData( numPoints )
for i in range( 0, numPoints ) :
t = i * math.pi * 2 / numPoints
loop[i] = V3f( math.cos( t ), math.sin( t ), 0 )
builder = MeshPrimitiveBuilder()
triangulator = V3fTriangulator( builder )
triangulator.triangulate( loop )
def testMultipleCalls( self ) :
# __ __
# |_| |_|
#
outline1 = V3fVectorData(
[
V3f( 0, 0, 0 ),
V3f( 1, 0, 0 ),
V3f( 1, 1, 0 ),
V3f( 0, 1, 0 ),
]
)
outline2 = V3fVectorData(
[
V3f( 3, 0, 0 ),
V3f( 4, 0, 0 ),
V3f( 4, 1, 0 ),
V3f( 3, 1, 0 ),
]
)
builder = MeshPrimitiveBuilder()
triangulator = V3fTriangulator( builder )
triangulator.triangulate( outline1 )
triangulator.triangulate( outline2 )
outMesh = builder.mesh()
self.assertEqual( outMesh["P"].data.size(), 8 )
self.assertEqual( outMesh.verticesPerFace, IntVectorData( [ 3, 3, 3, 3 ] ) )
self.assertEqual( outMesh.variableSize( PrimitiveVariable.Interpolation.Vertex ), 8 )
self.assertEqual( outMesh.bound(), Box3f( V3f( 0 ), V3f( 4, 1, 0 ) ) )
e = PrimitiveEvaluator.create( outMesh )
self.assertEqual( e.surfaceArea(), 2 )
def testMultipleCallsWithHoles( self ) :
# ______ ______
# | __ | | __ |
# | |_| | | |_| |
# |_____| |_____|
#
outer1 = V3fVectorData(
[
V3f( 0, 0, 0 ),
V3f( 3, 0, 0 ),
V3f( 3, 3, 0 ),
V3f( 0, 3, 0 ),
]
)
inner1 = V3fVectorData(
[
V3f( 1, 1, 0 ),
V3f( 1, 2, 0 ),
V3f( 2, 2, 0 ),
V3f( 2, 1, 0 ),
]
)
outer2 = V3fVectorData(
[
V3f( 4, 0, 0 ),
V3f( 7, 0, 0 ),
V3f( 7, 3, 0 ),
V3f( 4, 3, 0 ),
]
)
inner2 = V3fVectorData(
[
V3f( 5, 1, 0 ),
V3f( 5, 2, 0 ),
V3f( 6, 2, 0 ),
V3f( 6, 1, 0 ),
]
)
builder = MeshPrimitiveBuilder()
triangulator = V3fTriangulator( builder )
triangulator.triangulate( [ outer1, inner1 ] )
triangulator.triangulate( [ outer2, inner2 ] )
outMesh = builder.mesh()
self.assertEqual( outMesh["P"].data.size(), 16 )
self.assertEqual( outMesh.variableSize( PrimitiveVariable.Interpolation.Vertex ), 16 )
self.assertEqual( outMesh.bound(), Box3f( V3f( 0 ), V3f( 7, 3, 0 ) ) )
e = PrimitiveEvaluator.create( outMesh )
self.assertEqual( e.surfaceArea(), 16 )
def testRightAlignedHoles( self ) :
# ______
# | __ |
# | |_| |
# | __ |
# | |_| |
# |_____|
outer = V3fVectorData(
[
V3f( 0, 0, 0 ),
V3f( 3, 0, 0 ),
V3f( 3, 5, 0 ),
V3f( 0, 5, 0 ),
]
)
inner1 = V3fVectorData(
[
V3f( 1, 1, 0 ),
V3f( 1, 2, 0 ),
V3f( 2, 2, 0 ),
V3f( 2, 1, 0 ),
]
)
inner2 = V3fVectorData(
[
V3f( 1, 3, 0 ),
V3f( 1, 4, 0 ),
V3f( 2, 4, 0 ),
V3f( 2, 3, 0 ),
]
)
builder = MeshPrimitiveBuilder()
triangulator = V3fTriangulator( builder )
triangulator.triangulate( [ outer, inner1, inner2 ] )
outMesh = builder.mesh()
e = PrimitiveEvaluator.create( outMesh )
self.assertEqual( e.surfaceArea(), 13 )
def testColinearities( self ) :
# _._
# | |
# . .
# | |
# . .
# |_._|
#
outer = V3fVectorData(
[
V3f( 1, 1, 0 ),
V3f( 1, 2, 0 ),
V3f( 1, 3, 0 ),
V3f( 0.5, 3, 0 ),
V3f( 0, 3, 0 ),
V3f( 0, 2, 0 ),
V3f( 0, 1, 0 ),
V3f( 0, 0, 0 ),
V3f( 0.5, 0, 0 ),
V3f( 1, 0, 0 ),
]
)
builder = MeshPrimitiveBuilder()
triangulator = V3fTriangulator( builder )
triangulator.triangulate( outer )
outMesh = builder.mesh()
e = PrimitiveEvaluator.create( outMesh )
self.assertEqual( e.surfaceArea(), 3 )
p = outMesh["P"].data
self.assertEqual( p.size(), 10 )
vertexIds = outMesh.vertexIds
for i in range( 0, 10 ) :
self.assert_( i in vertexIds )
self.assertEqual( outMesh.verticesPerFace, IntVectorData( [ 3 ] * 8 ) )
i = 0
for j in range( 0, 8 ) :
p0 = p[vertexIds[i]]
p1 = p[vertexIds[i+1]]
p2 = p[vertexIds[i+2]]
self.assert_( triangleArea( p0, p1, p2 ) > 0 )
def testHoleAlignedWithVertex( self ) :
# ______
# | __ |
# | |_| |
# |___ |
# |__|
#
outer = V3fVectorData(
[
V3f( 0, 0, 0 ),
V3f( 2, 0, 0 ),
V3f( 2, -1, 0 ),
V3f( 3, -1, 0 ),
V3f( 3, 3, 0 ),
V3f( 0, 3, 0 ),
]
)
inner = V3fVectorData(
[
V3f( 1, 1, 0 ),
V3f( 1, 2, 0 ),
V3f( 2, 2, 0 ),
V3f( 2, 1, 0 ),
]
)
builder = MeshPrimitiveBuilder()
triangulator = V3fTriangulator( builder )
triangulator.triangulate( [ outer, inner ] )
outMesh = builder.mesh()
e = PrimitiveEvaluator.create( outMesh )
self.assertEqual( e.surfaceArea(), 9 )
if __name__ == "__main__":
unittest.main()
| |
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# revised by cotyb in 2016/1/30
# details: http://www.cnblogs.com/cotyb/p/5067844.html
import logging
import six
import struct
import time
import json
from ryu import cfg
from ryu.topology import event
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.exception import RyuException
from ryu.lib import addrconv, hub
from ryu.lib.mac import DONTCARE_STR
from ryu.lib.dpid import dpid_to_str, str_to_dpid
from ryu.lib.port_no import port_no_to_str
from ryu.lib.packet import packet, ethernet
from ryu.lib.packet import lldp, ether_types
from ryu.lib.packet import arp, ipv4, ipv6
from ryu.ofproto.ether import ETH_TYPE_LLDP
from ryu.ofproto import nx_match
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_4
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.BoolOpt('observe-links', default=False,
help='observe link discovery events.'),
cfg.BoolOpt('install-lldp-flow', default=True,
help='link discovery: explicitly install flow entry '
'to send lldp packet to controller'),
cfg.BoolOpt('explicit-drop', default=True,
help='link discovery: explicitly drop lldp packet in')
])
class Port(object):
# This is data class passed by EventPortXXX
def __init__(self, dpid, ofproto, ofpport):
super(Port, self).__init__()
self.dpid = dpid
self._ofproto = ofproto
self._config = ofpport.config
self._state = ofpport.state
self.port_no = ofpport.port_no
self.hw_addr = ofpport.hw_addr
self.name = ofpport.name
def is_reserved(self):
return self.port_no > self._ofproto.OFPP_MAX
def is_down(self):
return (self._state & self._ofproto.OFPPS_LINK_DOWN) > 0 \
or (self._config & self._ofproto.OFPPC_PORT_DOWN) > 0
def is_live(self):
# NOTE: OF1.2 has OFPPS_LIVE state
# return (self._state & self._ofproto.OFPPS_LIVE) > 0
return not self.is_down()
def to_dict(self):
return {'dpid': dpid_to_str(self.dpid),
'port_no': port_no_to_str(self.port_no),
'hw_addr': self.hw_addr,
'name': self.name.rstrip('\0')}
# for Switch.del_port()
def __eq__(self, other):
return self.dpid == other.dpid and self.port_no == other.port_no
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.dpid, self.port_no))
def __str__(self):
LIVE_MSG = {False: 'DOWN', True: 'LIVE'}
return 'Port<dpid=%s, port_no=%s, %s>' % \
(self.dpid, self.port_no, LIVE_MSG[self.is_live()])
class Switch(object):
# This is data class passed by EventSwitchXXX
def __init__(self, dp):
super(Switch, self).__init__()
self.dp = dp
self.ports = []
def add_port(self, ofpport):
port = Port(self.dp.id, self.dp.ofproto, ofpport)
if not port.is_reserved():
self.ports.append(port)
def del_port(self, ofpport):
self.ports.remove(Port(ofpport))
def to_dict(self):
d = {'dpid': dpid_to_str(self.dp.id),
'ports': [port.to_dict() for port in self.ports]}
return d
def __str__(self):
msg = 'Switch<dpid=%s, ' % self.dp.id
for port in self.ports:
msg += str(port) + ' '
msg += '>'
return msg
class Link(object):
# This is data class passed by EventLinkXXX
def __init__(self, src, dst):
super(Link, self).__init__()
self.src = src
self.dst = dst
def to_dict(self):
d = {'src': self.src.to_dict(),
'dst': self.dst.to_dict()}
return d
# this type is used for key value of LinkState
def __eq__(self, other):
return self.src == other.src and self.dst == other.dst
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.src, self.dst))
def __str__(self):
return 'Link: %s to %s' % (self.src, self.dst)
class Host(object):
# This is data class passed by EventHostXXX
def __init__(self, mac, port):
super(Host, self).__init__()
self.port = port
self.mac = mac
self.ipv4 = []
self.ipv6 = []
def to_dict(self):
d = {'mac': self.mac,
'ipv4': self.ipv4,
'ipv6': self.ipv6,
'port': self.port.to_dict()}
return d
def __eq__(self, host):
return self.mac == host.mac and self.port == host.port
def __str__(self):
msg = 'Host<mac=%s, port=%s,' % (self.mac, str(self.port))
msg += ','.join(self.ipv4)
msg += ','.join(self.ipv6)
msg += '>'
return msg
class HostState(dict):
# mac address -> Host class
def __init__(self):
super(HostState, self).__init__()
def add(self, host):
mac = host.mac
self.setdefault(mac, host)
def update_ip(self, host, ip_v4=None, ip_v6=None):
mac = host.mac
host = None
if mac in self:
host = self[mac]
if not host:
return
if ip_v4 != None and ip_v4 not in host.ipv4:
host.ipv4.append(ip_v4)
if ip_v6 != None and ip_v6 not in host.ipv6:
host.ipv6.append(ip_v6)
def get_by_dpid(self, dpid):
result = []
for mac in self:
host = self[mac]
if host.port.dpid == dpid:
result.append(host)
return result
class PortState(dict):
# dict: int port_no -> OFPPort port
# OFPPort is defined in ryu.ofproto.ofproto_v1_X_parser
def __init__(self):
super(PortState, self).__init__()
def add(self, port_no, port):
self[port_no] = port
def remove(self, port_no):
del self[port_no]
def modify(self, port_no, port):
self[port_no] = port
class SwitchData(object):
# store the lldp information, send one LLDP information per switch
def __init__(self, lldp_data):
super(SwitchData, self).__init__()
self.lldp_data = lldp_data
self.timestamp = None
self.sent = 0
def lldp_sent(self):
self.timestamp = time.time()
self.sent += 1
def lldp_received(self):
self.sent = 0
def lldp_dropped(self):
return self.sent
def clear_timestamp(self):
self.timestamp = None
def __str__(self):
return 'SwitchData<timestamp=%s, sent=%d>' \
% (self.timestamp, self.sent)
class PortData(object):
def __init__(self, is_down, lldp_data):
super(PortData, self).__init__()
self.is_down = is_down
self.lldp_data = lldp_data
self.timestamp = None
self.sent = 0
def lldp_sent(self):
self.timestamp = time.time()
self.sent += 1
def lldp_received(self):
self.sent = 0
def lldp_dropped(self):
return self.sent
def clear_timestamp(self):
self.timestamp = None
def set_down(self, is_down):
self.is_down = is_down
def __str__(self):
return 'PortData<live=%s, timestamp=%s, sent=%d>' \
% (not self.is_down, self.timestamp, self.sent)
class SwitchDataState(dict):
# dict: Switch class -> SwitchData class
# slimed down version of OrderedDict as python 2.6 doesn't support it.
_PREV = 0
_NEXT = 1
_KEY = 2
def __init__(self):
super(SwitchDataState, self).__init__()
self._root = root = [] # sentinel node
root[:] = [root, root, None] # [_PREV, _NEXT, _KEY]
# doubly linked list
self._map = {}
def _remove_key(self, key):
link_prev, link_next, key = self._map.pop(key)
link_prev[self._NEXT] = link_next
link_next[self._PREV] = link_prev
def _append_key(self, key):
root = self._root
last = root[self._PREV]
last[self._NEXT] = root[self._PREV] = self._map[key] = [last, root,
key]
def _prepend_key(self, key):
root = self._root
first = root[self._NEXT]
first[self._PREV] = root[self._NEXT] = self._map[key] = [root, first,
key]
def _move_last_key(self, key):
self._remove_key(key)
self._append_key(key)
def _move_front_key(self, key):
self._remove_key(key)
self._prepend_key(key)
def add_switch(self, dp, lldp_data):
if dp not in self:
self._prepend_key(dp)
self[dp] = SwitchData(lldp_data)
def lldp_sent(self, dp):
switch_data = self[dp]
switch_data.lldp_sent()
self._move_last_key(dp)
return switch_data
def lldp_received(self, dp):
self[dp].lldp_received()
def move_front(self, dp):
switch_data = self.get(dp, None)
if switch_data is not None:
switch_data.clear_timestamp()
self._move_front_key(dp)
def get_switch(self, dp):
return self[dp]
def del_port(self, dp):
del self[dp]
self._remove_key(dp)
def __iter__(self):
root = self._root
curr = root[self._NEXT]
while curr is not root:
yield curr[self._KEY]
curr = curr[self._NEXT]
def clear(self):
for node in self._map.values():
del node[:]
root = self._root
root[:] = [root, root, None]
self._map.clear()
dict.clear(self)
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) pairs in od'
for k in self:
yield (k, self[k])
class PortDataState(dict):
# dict: Port class -> PortData class
# slimed down version of OrderedDict as python 2.6 doesn't support it.
_PREV = 0
_NEXT = 1
_KEY = 2
def __init__(self):
super(PortDataState, self).__init__()
self._root = root = [] # sentinel node
root[:] = [root, root, None] # [_PREV, _NEXT, _KEY]
# doubly linked list
self._map = {}
def _remove_key(self, key):
link_prev, link_next, key = self._map.pop(key)
link_prev[self._NEXT] = link_next
link_next[self._PREV] = link_prev
def _append_key(self, key):
root = self._root
last = root[self._PREV]
last[self._NEXT] = root[self._PREV] = self._map[key] = [last, root,
key]
def _prepend_key(self, key):
root = self._root
first = root[self._NEXT]
first[self._PREV] = root[self._NEXT] = self._map[key] = [root, first,
key]
def _move_last_key(self, key):
self._remove_key(key)
self._append_key(key)
def _move_front_key(self, key):
self._remove_key(key)
self._prepend_key(key)
def add_port(self, port, lldp_data):
if port not in self:
self._prepend_key(port)
self[port] = PortData(port.is_down(), lldp_data)
else:
self[port].is_down = port.is_down()
def lldp_sent(self, port):
port_data = self[port]
port_data.lldp_sent()
self._move_last_key(port)
return port_data
def lldp_received(self, port):
self[port].lldp_received()
def move_front(self, port):
port_data = self.get(port, None)
if port_data is not None:
port_data.clear_timestamp()
self._move_front_key(port)
def set_down(self, port):
is_down = port.is_down()
port_data = self[port]
port_data.set_down(is_down)
port_data.clear_timestamp()
if not is_down:
self._move_front_key(port)
return is_down
def get_port(self, port):
return self[port]
def del_port(self, port):
del self[port]
self._remove_key(port)
def __iter__(self):
root = self._root
curr = root[self._NEXT]
while curr is not root:
yield curr[self._KEY]
curr = curr[self._NEXT]
def clear(self):
for node in self._map.values():
del node[:]
root = self._root
root[:] = [root, root, None]
self._map.clear()
dict.clear(self)
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) pairs in od'
for k in self:
yield (k, self[k])
class LinkState(dict):
# dict: Link class -> timestamp
def __init__(self):
super(LinkState, self).__init__()
self._map = {}
def get_peer(self, src):
return self._map.get(src, None)
def update_link(self, src, dst):
link = Link(src, dst)
self[link] = time.time()
self._map[src] = dst
# return if the reverse link is also up or not
rev_link = Link(dst, src)
return rev_link in self
def link_down(self, link):
del self[link]
del self._map[link.src]
def rev_link_set_timestamp(self, rev_link, timestamp):
# rev_link may or may not in LinkSet
if rev_link in self:
self[rev_link] = timestamp
def port_deleted(self, src):
dst = self.get_peer(src)
if dst is None:
raise KeyError()
link = Link(src, dst)
rev_link = Link(dst, src)
del self[link]
del self._map[src]
# reverse link might not exist
self.pop(rev_link, None)
rev_link_dst = self._map.pop(dst, None)
return dst, rev_link_dst
class LLDPPacket(object):
# make a LLDP packet for link discovery.
'''
instead of sending LLDP to every port
just send LLDP to every switch
'''
CHASSIS_ID_PREFIX = 'dpid:'
CHASSIS_ID_PREFIX_LEN = len(CHASSIS_ID_PREFIX)
CHASSIS_ID_FMT = CHASSIS_ID_PREFIX + '%s'
PORT_ID_STR = '!I' # uint32_t
PORT_ID_SIZE = 4
class LLDPUnknownFormat(RyuException):
message = '%(msg)s'
@staticmethod
def lldp_packet(dpid, port_no, dl_addr, ttl):
pkt = packet.Packet()
dst = lldp.LLDP_MAC_NEAREST_BRIDGE
src = dl_addr
ethertype = ETH_TYPE_LLDP
eth_pkt = ethernet.ethernet(dst, src, ethertype)
pkt.add_protocol(eth_pkt)
tlv_chassis_id = lldp.ChassisID(
subtype=lldp.ChassisID.SUB_LOCALLY_ASSIGNED,
chassis_id=(LLDPPacket.CHASSIS_ID_FMT %
dpid_to_str(dpid)).encode('ascii'))
tlv_port_id = lldp.PortID(subtype=lldp.PortID.SUB_PORT_COMPONENT,
port_id=struct.pack(
LLDPPacket.PORT_ID_STR,
port_no))
tlv_ttl = lldp.TTL(ttl=ttl)
tlv_end = lldp.End()
tlvs = (tlv_chassis_id, tlv_port_id, tlv_ttl, tlv_end)
lldp_pkt = lldp.lldp(tlvs)
pkt.add_protocol(lldp_pkt)
pkt.serialize()
return pkt.data
@staticmethod
def lldp_parse(data):
pkt = packet.Packet(data)
i = iter(pkt)
eth_pkt = six.next(i)
assert type(eth_pkt) == ethernet.ethernet
lldp_pkt = six.next(i)
if type(lldp_pkt) != lldp.lldp:
raise LLDPPacket.LLDPUnknownFormat()
tlv_chassis_id = lldp_pkt.tlvs[0]
if tlv_chassis_id.subtype != lldp.ChassisID.SUB_LOCALLY_ASSIGNED:
raise LLDPPacket.LLDPUnknownFormat(
msg='unknown chassis id subtype %d' % tlv_chassis_id.subtype)
chassis_id = tlv_chassis_id.chassis_id
if not chassis_id.startswith(LLDPPacket.CHASSIS_ID_PREFIX):
raise LLDPPacket.LLDPUnknownFormat(
msg='unknown chassis id format %s' % chassis_id)
src_dpid = str_to_dpid(chassis_id[LLDPPacket.CHASSIS_ID_PREFIX_LEN:])
return src_dpid
class Switches(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION, ofproto_v1_2.OFP_VERSION,
ofproto_v1_3.OFP_VERSION, ofproto_v1_4.OFP_VERSION]
_EVENTS = [event.EventSwitchEnter, event.EventSwitchLeave,
event.EventPortAdd, event.EventPortDelete,
event.EventPortModify,
event.EventLinkAdd, event.EventLinkDelete,
event.EventHostAdd]
DEFAULT_TTL = 120 # unused. ignored.
LLDP_PACKET_LEN = len(LLDPPacket.lldp_packet(0, 0, DONTCARE_STR, 0))
LLDP_SEND_GUARD = .05
LLDP_SEND_PERIOD_PER_PORT = .9
LLDP_SEND_PERIOD_PER_SWITCH = .9
TIMEOUT_CHECK_PERIOD = 5.
LINK_TIMEOUT = TIMEOUT_CHECK_PERIOD * 2
LINK_LLDP_DROP = 5
def __init__(self, *args, **kwargs):
super(Switches, self).__init__(*args, **kwargs)
self.name = 'switches'
self.dps = {} # datapath_id => Datapath class
self.port_state = {} # datapath_id => ports
self.ports = PortDataState() # Port class -> PortData class
self.switches = SwitchDataState() # Switch class -> SwitchData class
self.links = LinkState() # Link class -> timestamp
self.hosts = HostState() # mac address -> Host class list
self.is_active = True
self.link_discovery = self.CONF.observe_links
if self.link_discovery:
self.install_flow = self.CONF.install_lldp_flow
self.explicit_drop = self.CONF.explicit_drop
self.lldp_event = hub.Event()
self.link_event = hub.Event()
self.threads.append(hub.spawn(self.lldp_loop))
self.threads.append(hub.spawn(self.link_loop))
def close(self):
self.is_active = False
if self.link_discovery:
self.lldp_event.set()
self.link_event.set()
hub.joinall(self.threads)
def _register(self, dp):
assert dp.id is not None
self.dps[dp.id] = dp
if dp.id not in self.port_state:
self.port_state[dp.id] = PortState()
for port in dp.ports.values():
self.port_state[dp.id].add(port.port_no, port)
def _unregister(self, dp):
if dp.id in self.dps:
del self.dps[dp.id]
del self.port_state[dp.id]
def _get_switch(self, dpid):
if dpid in self.dps:
switch = Switch(self.dps[dpid])
for ofpport in self.port_state[dpid].values():
switch.add_port(ofpport)
return switch
def _get_port(self, dpid, port_no):
switch = self._get_switch(dpid)
if switch:
for p in switch.ports:
if p.port_no == port_no:
return p
def _port_added(self, port):
lldp_data = LLDPPacket.lldp_packet(
port.dpid, 0, port.hw_addr, self.DEFAULT_TTL)
self.ports.add_port(port, lldp_data)
# LOG.debug('_port_added dpid=%s, port_no=%s, live=%s',
# port.dpid, port.port_no, port.is_live())
# construct LLDP packet for switch
def _switch_added(self, dp):
lldp_data = LLDPPacket.lldp_packet(
dp.dp.id, 0, '00:00:00:00:00:00', self.DEFAULT_TTL)
self.switches.add_switch(dp, lldp_data)
# LOG.debug('_port_added dpid=%s, port_no=%s, live=%s',
# port.dpid, port.port_no, port.is_live())
def _link_down(self, port):
try:
dst, rev_link_dst = self.links.port_deleted(port)
except KeyError:
# LOG.debug('key error. src=%s, dst=%s',
# port, self.links.get_peer(port))
return
link = Link(port, dst)
self.send_event_to_observers(event.EventLinkDelete(link))
if rev_link_dst:
rev_link = Link(dst, rev_link_dst)
self.send_event_to_observers(event.EventLinkDelete(rev_link))
self.ports.move_front(dst)
def _is_edge_port(self, port):
for link in self.links:
if port == link.src or port == link.dst:
return False
return True
@set_ev_cls(ofp_event.EventOFPStateChange,
[MAIN_DISPATCHER, DEAD_DISPATCHER])
def state_change_handler(self, ev):
dp = ev.datapath
assert dp is not None
LOG.debug(dp)
if ev.state == MAIN_DISPATCHER:
dp_multiple_conns = False
if dp.id in self.dps:
LOG.warning('multiple connections from %s', dpid_to_str(dp.id))
dp_multiple_conns = True
self._register(dp)
switch = self._get_switch(dp.id)
LOG.debug('register %s', switch)
# Do not send event while dp has multiple connections.
if not dp_multiple_conns:
self.send_event_to_observers(event.EventSwitchEnter(switch))
self._switch_added(switch)
self.lldp_event.set()
if not self.link_discovery:
return
if self.install_flow:
ofproto = dp.ofproto
ofproto_parser = dp.ofproto_parser
# TODO:XXX need other versions
if ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
rule = nx_match.ClsRule()
rule.set_dl_dst(addrconv.mac.text_to_bin(
lldp.LLDP_MAC_NEAREST_BRIDGE))
rule.set_dl_type(ETH_TYPE_LLDP)
actions = [ofproto_parser.OFPActionOutput(
ofproto.OFPP_CONTROLLER, self.LLDP_PACKET_LEN)]
dp.send_flow_mod(
rule=rule, cookie=0, command=ofproto.OFPFC_ADD,
idle_timeout=0, hard_timeout=0, actions=actions,
priority=0xFFFF)
elif ofproto.OFP_VERSION >= ofproto_v1_2.OFP_VERSION:
match = ofproto_parser.OFPMatch(
eth_type=ETH_TYPE_LLDP,
eth_dst=lldp.LLDP_MAC_NEAREST_BRIDGE)
# OFPCML_NO_BUFFER is set so that the LLDP is not
# buffered on switch
parser = ofproto_parser
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER
)]
inst = [parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = parser.OFPFlowMod(datapath=dp, match=match,
idle_timeout=0, hard_timeout=0,
instructions=inst,
priority=0xFFFF)
dp.send_msg(mod)
else:
LOG.error('cannot install flow. unsupported version. %x',
dp.ofproto.OFP_VERSION)
# Do not add ports while dp has multiple connections to controller.
if not dp_multiple_conns:
self._switch_added(switch)
for port in switch.ports:
if not port.is_reserved():
self._port_added(port)
self.lldp_event.set()
elif ev.state == DEAD_DISPATCHER:
# dp.id is None when datapath dies before handshake
if dp.id is None:
return
switch = self._get_switch(dp.id)
self._unregister(dp)
LOG.debug('unregister %s', switch)
self.send_event_to_observers(event.EventSwitchLeave(switch))
if not self.link_discovery:
return
for port in switch.ports:
if not port.is_reserved():
self.ports.del_port(port)
self._link_down(port)
self.lldp_event.set()
@set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER)
def port_status_handler(self, ev):
msg = ev.msg
reason = msg.reason
dp = msg.datapath
ofpport = msg.desc
if reason == dp.ofproto.OFPPR_ADD:
# LOG.debug('A port was added.' +
# '(datapath id = %s, port number = %s)',
# dp.id, ofpport.port_no)
self.port_state[dp.id].add(ofpport.port_no, ofpport)
self.send_event_to_observers(
event.EventPortAdd(Port(dp.id, dp.ofproto, ofpport)))
'''
if not self.link_discovery:
return
port = self._get_port(dp.id, ofpport.port_no)
if port and not port.is_reserved():
self._port_added(port)
self.lldp_event.set()
'''
elif reason == dp.ofproto.OFPPR_DELETE:
# LOG.debug('A port was deleted.' +
# '(datapath id = %s, port number = %s)',
# dp.id, ofpport.port_no)
self.port_state[dp.id].remove(ofpport.port_no)
self.send_event_to_observers(
event.EventPortDelete(Port(dp.id, dp.ofproto, ofpport)))
if not self.link_discovery:
return
port = self._get_port(dp.id, ofpport.port_no)
if port and not port.is_reserved():
self.ports.del_port(port)
self._link_down(port)
self.lldp_event.set()
else:
assert reason == dp.ofproto.OFPPR_MODIFY
# LOG.debug('A port was modified.' +
# '(datapath id = %s, port number = %s)',
# dp.id, ofpport.port_no)
self.port_state[dp.id].modify(ofpport.port_no, ofpport)
self.send_event_to_observers(
event.EventPortModify(Port(dp.id, dp.ofproto, ofpport)))
if not self.link_discovery:
return
port = self._get_port(dp.id, ofpport.port_no)
if port and not port.is_reserved():
if self.ports.set_down(port):
self._link_down(port)
self.lldp_event.set()
@staticmethod
def _drop_packet(msg):
buffer_id = msg.buffer_id
if buffer_id == msg.datapath.ofproto.OFP_NO_BUFFER:
return
dp = msg.datapath
# TODO:XXX
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
dp.send_packet_out(buffer_id, msg.in_port, [])
elif dp.ofproto.OFP_VERSION >= ofproto_v1_2.OFP_VERSION:
dp.send_packet_out(buffer_id, msg.match['in_port'], [])
else:
LOG.error('cannot drop_packet. unsupported version. %x',
dp.ofproto.OFP_VERSION)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def lldp_packet_in_handler(self, ev):
if not self.link_discovery:
return
msg = ev.msg
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
try:
src_mac = eth.src
src_dpid = LLDPPacket.lldp_parse(msg.data)
except LLDPPacket.LLDPUnknownFormat as e:
# This handler can receive all the packtes which can be
# not-LLDP packet. Ignore it silently
return
for port in self.port_state[src_dpid].values():
if port.hw_addr == src_mac:
src_port_no = port.port_no
dst_dpid = msg.datapath.id
if msg.datapath.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
dst_port_no = msg.in_port
elif msg.datapath.ofproto.OFP_VERSION >= ofproto_v1_2.OFP_VERSION:
dst_port_no = msg.match['in_port']
else:
LOG.error('cannot accept LLDP. unsupported version. %x',
msg.datapath.ofproto.OFP_VERSION)
src = self._get_port(src_dpid, src_port_no)
if not src or src.dpid == dst_dpid:
return
try:
self.ports.lldp_received(src)
except KeyError:
# There are races between EventOFPPacketIn and
# EventDPPortAdd. So packet-in event can happend before
# port add event. In that case key error can happend.
# LOG.debug('lldp_received: KeyError %s', e)
pass
dst = self._get_port(dst_dpid, dst_port_no)
if not dst:
return
old_peer = self.links.get_peer(src)
# LOG.debug("Packet-In")
# LOG.debug(" src=%s", src)
# LOG.debug(" dst=%s", dst)
# LOG.debug(" old_peer=%s", old_peer)
if old_peer and old_peer != dst:
old_link = Link(src, old_peer)
self.send_event_to_observers(event.EventLinkDelete(old_link))
link = Link(src, dst)
if link not in self.links:
self.send_event_to_observers(event.EventLinkAdd(link))
# remove hosts from edge port
for host in self.hosts.values():
if not self._is_edge_port(host.port):
del self.hosts[host.mac]
if not self.links.update_link(src, dst):
# reverse link is not detected yet.
# So schedule the check early because it's very likely it's up
self.ports.move_front(dst)
self.lldp_event.set()
if self.explicit_drop:
self._drop_packet(msg)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def host_discovery_packet_in_handler(self, ev):
msg = ev.msg
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
# ignore lldp packet
if eth.ethertype == ETH_TYPE_LLDP:
return
datapath = msg.datapath
dpid = datapath.id
port_no = -1
if msg.datapath.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
port_no = msg.in_port
else:
port_no = msg.match['in_port']
port = self._get_port(dpid, port_no)
# can't find this port(ex: logic port)
if not port:
return
# ignore switch-to-switch port
if not self._is_edge_port(port):
return
host_mac = eth.src
host = Host(host_mac, port)
if host_mac not in self.hosts:
self.hosts.add(host)
ev = event.EventHostAdd(host)
self.send_event_to_observers(ev)
# arp packet, update ip address
if eth.ethertype == ether_types.ETH_TYPE_ARP:
arp_pkt = pkt.get_protocols(arp.arp)[0]
self.hosts.update_ip(host, ip_v4=arp_pkt.src_ip)
# ipv4 packet, update ipv4 address
elif eth.ethertype == ether_types.ETH_TYPE_IP:
ipv4_pkt = pkt.get_protocols(ipv4.ipv4)[0]
self.hosts.update_ip(host, ip_v4=ipv4_pkt.src)
# ipv6 packet, update ipv6 address
elif eth.ethertype == ether_types.ETH_TYPE_IPV6:
# TODO: need to handle NDP
ipv6_pkt = pkt.get_protocols(ipv6.ipv6)[0]
self.hosts.update_ip(host, ip_v6=ipv6_pkt.src)
def send_lldp_packet(self, sw):
dp = sw.dp
if dp is None:
return
try:
switch_data = self.switches.lldp_sent(sw)
except KeyError as e:
# ports can be modified during our sleep in self.lldp_loop()
# LOG.debug('send_lldp: KeyError %s', e)
return
# LOG.debug('lldp sent dpid=%s, port_no=%d', dp.id, port.port_no)
# TODO:XXX
actions = []
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
for port_infor in self.port_state[dp.id].values():
if port_infor.name != "tap:":
actions.append(dp.ofproto_parser.OFPActionSetDlSrc(port_infor.hw_addr))
actions = [dp.ofproto_parser.OFPActionOutput(port_infor.port_no)]
dp.send_packet_out(actions=actions, data=switch_data.lldp_data)
elif dp.ofproto.OFP_VERSION >= ofproto_v1_2.OFP_VERSION:
for port_infor in self.port_state[dp.id].values():
if port_infor.name != "tap:":
actions.append(dp.ofproto_parser.OFPActionSetField(eth_src=port_infor.hw_addr))
actions.append(dp.ofproto_parser.OFPActionOutput(port_infor.port_no))
# actions = [dp.ofproto_parser.OFPActionOutput(self.port_state[dp].port_no)]
out = dp.ofproto_parser.OFPPacketOut(
datapath=dp, in_port=dp.ofproto.OFPP_CONTROLLER,
buffer_id=dp.ofproto.OFP_NO_BUFFER, actions=actions,
data=switch_data.lldp_data)
dp.send_msg(out)
else:
LOG.error('cannot send lldp packet. unsupported version. %x',
dp.ofproto.OFP_VERSION)
def lldp_loop(self):
while self.is_active:
self.lldp_event.clear()
now = time.time()
timeout = None
switches_now = []
switches = []
for (key, data) in self.switches.items():
if data.timestamp is None:
switches_now.append(key)
continue
expire = data.timestamp + self.LLDP_SEND_PERIOD_PER_SWITCH
if expire <= now:
switches.append(key)
continue
timeout = expire - now
break
for switch in switches_now:
self.send_lldp_packet(switch)
for switch in switches:
self.send_lldp_packet(switch)
hub.sleep(self.LLDP_SEND_GUARD) # don't burst
if timeout is not None and switches:
timeout = 0 # We have already slept
# LOG.debug('lldp sleep %s', timeout)
self.lldp_event.wait(timeout=timeout)
def link_loop(self):
while self.is_active:
self.link_event.clear()
now = time.time()
deleted = []
for (link, timestamp) in self.links.items():
# LOG.debug('%s timestamp %d (now %d)', link, timestamp, now)
if timestamp + self.LINK_TIMEOUT < now:
src = link.src
if src in self.ports:
port_data = self.ports.get_port(src)
# LOG.debug('port_data %s', port_data)
if port_data.lldp_dropped() > self.LINK_LLDP_DROP:
deleted.append(link)
for link in deleted:
self.links.link_down(link)
# LOG.debug('delete %s', link)
self.send_event_to_observers(event.EventLinkDelete(link))
dst = link.dst
rev_link = Link(dst, link.src)
if rev_link not in deleted:
# It is very likely that the reverse link is also
# disconnected. Check it early.
expire = now - self.LINK_TIMEOUT
self.links.rev_link_set_timestamp(rev_link, expire)
if dst in self.ports:
self.ports.move_front(dst)
self.lldp_event.set()
self.link_event.wait(timeout=self.TIMEOUT_CHECK_PERIOD)
@set_ev_cls(event.EventSwitchRequest)
def switch_request_handler(self, req):
# LOG.debug(req)
dpid = req.dpid
switches = []
if dpid is None:
# reply all list
for dp in self.dps.values():
switches.append(self._get_switch(dp.id))
elif dpid in self.dps:
switches.append(self._get_switch(dpid))
rep = event.EventSwitchReply(req.src, switches)
self.reply_to_request(req, rep)
@set_ev_cls(event.EventLinkRequest)
def link_request_handler(self, req):
# LOG.debug(req)
dpid = req.dpid
if dpid is None:
links = self.links
else:
links = [link for link in self.links if link.src.dpid == dpid]
rep = event.EventLinkReply(req.src, dpid, links)
self.reply_to_request(req, rep)
@set_ev_cls(event.EventHostRequest)
def host_request_handler(self, req):
dpid = req.dpid
hosts = []
if dpid is None:
for mac in self.hosts:
hosts.append(self.hosts[mac])
else:
hosts = self.hosts.get_by_dpid(dpid)
rep = event.EventHostReply(req.src, dpid, hosts)
self.reply_to_request(req, rep)
| |
# Copyright 2015 CloudByte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import units
import six
from six.moves import http_client
from six.moves import urllib
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder import interface
from cinder.volume.drivers.cloudbyte import options
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
@interface.volumedriver
class CloudByteISCSIDriver(san.SanISCSIDriver):
"""CloudByte ISCSI Driver.
Version history:
1.0.0 - Initial driver
1.1.0 - Add chap support and minor bug fixes
1.1.1 - Add wait logic for delete volumes
1.1.2 - Update ig to None before delete volume
1.2.0 - Add retype support
"""
VERSION = '1.2.0'
CI_WIKI_NAME = "CloudByte_CI"
# TODO(smcginnis) Either remove this if CI requirements are met, or
# remove this driver in the Pike release per normal deprecation
SUPPORTED = False
volume_stats = {}
def __init__(self, *args, **kwargs):
super(CloudByteISCSIDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(
options.cloudbyte_add_qosgroup_opts)
self.configuration.append_config_values(
options.cloudbyte_create_volume_opts)
self.configuration.append_config_values(
options.cloudbyte_update_volume_opts)
self.configuration.append_config_values(
options.cloudbyte_connection_opts)
self.cb_use_chap = self.configuration.use_chap_auth
self.get_volume_stats()
def _get_url(self, cmd, params, apikey):
"""Will prepare URL that connects to CloudByte."""
if params is None:
params = {}
params['command'] = cmd
params['response'] = 'json'
sanitized_params = {}
for key in params:
value = params[key]
if value is not None:
sanitized_params[key] = six.text_type(value)
sanitized_params = urllib.parse.urlencode(sanitized_params)
url = ('/client/api?%s' % sanitized_params)
LOG.debug("CloudByte URL to be executed: [%s].", url)
# Add the apikey
api = {}
api['apiKey'] = apikey
url = url + '&' + urllib.parse.urlencode(api)
return url
def _extract_http_error(self, error_data):
# Extract the error message from error_data
error_msg = ""
# error_data is a single key value dict
for key, value in error_data.items():
error_msg = value.get('errortext')
return error_msg
def _execute_and_get_response_details(self, host, url):
"""Will prepare response after executing an http request."""
res_details = {}
try:
# Prepare the connection
connection = http_client.HTTPSConnection(host)
# Make the connection
connection.request('GET', url)
# Extract the response as the connection was successful
response = connection.getresponse()
# Read the response
data = response.read()
# Transform the json string into a py object
data = json.loads(data)
# Extract http error msg if any
error_details = None
if response.status != 200:
error_details = self._extract_http_error(data)
# Prepare the return object
res_details['data'] = data
res_details['error'] = error_details
res_details['http_status'] = response.status
finally:
connection.close()
LOG.debug("CloudByte connection was closed successfully.")
return res_details
def _api_request_for_cloudbyte(self, cmd, params, version=None):
"""Make http calls to CloudByte."""
LOG.debug("Executing CloudByte API for command [%s].", cmd)
if version is None:
version = CloudByteISCSIDriver.VERSION
# Below is retrieved from /etc/cinder/cinder.conf
apikey = self.configuration.cb_apikey
if apikey is None:
msg = (_("API key is missing for CloudByte driver."))
raise exception.VolumeBackendAPIException(data=msg)
host = self.configuration.san_ip
# Construct the CloudByte URL with query params
url = self._get_url(cmd, params, apikey)
data = {}
error_details = None
http_status = None
try:
# Execute CloudByte API & frame the response
res_obj = self._execute_and_get_response_details(host, url)
data = res_obj['data']
error_details = res_obj['error']
http_status = res_obj['http_status']
except http_client.HTTPException as ex:
msg = (_("Error executing CloudByte API [%(cmd)s], "
"Error: %(err)s.") %
{'cmd': cmd, 'err': ex})
raise exception.VolumeBackendAPIException(data=msg)
# Check if it was an error response from CloudByte
if http_status != 200:
msg = (_("Failed to execute CloudByte API [%(cmd)s]."
" Http status: %(status)s,"
" Error: %(error)s.") %
{'cmd': cmd, 'status': http_status,
'error': error_details})
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("CloudByte API executed successfully for command [%s]."),
cmd)
return data
def _request_tsm_details(self, account_id):
params = {"accountid": account_id}
# List all CloudByte tsm
data = self._api_request_for_cloudbyte("listTsm", params)
return data
def _add_qos_group_request(self, volume, tsmid, volume_name,
qos_group_params):
# Prepare the user input params
params = {
"name": "QoS_" + volume_name,
"tsmid": tsmid
}
# Get qos related params from configuration
params.update(self.configuration.cb_add_qosgroup)
# Override the default configuration by qos specs
if qos_group_params:
params.update(qos_group_params)
data = self._api_request_for_cloudbyte("addQosGroup", params)
return data
def _create_volume_request(self, volume, datasetid, qosgroupid,
tsmid, volume_name, file_system_params):
size = volume.get('size')
quotasize = six.text_type(size) + "G"
# Prepare the user input params
params = {
"datasetid": datasetid,
"name": volume_name,
"qosgroupid": qosgroupid,
"tsmid": tsmid,
"quotasize": quotasize
}
# Get the additional params from configuration
params.update(self.configuration.cb_create_volume)
# Override the default configuration by qos specs
if file_system_params:
params.update(file_system_params)
data = self._api_request_for_cloudbyte("createVolume", params)
return data
def _queryAsyncJobResult_request(self, jobid):
async_cmd = "queryAsyncJobResult"
params = {
"jobId": jobid,
}
data = self._api_request_for_cloudbyte(async_cmd, params)
return data
def _get_tsm_details(self, data, tsm_name, account_name):
# Filter required tsm's details
tsms = data['listTsmResponse'].get('listTsm')
if tsms is None:
msg = (_("TSM [%(tsm)s] was not found in CloudByte storage "
"for account [%(account)s].") %
{'tsm': tsm_name, 'account': account_name})
raise exception.VolumeBackendAPIException(data=msg)
tsmdetails = {}
for tsm in tsms:
if tsm['name'] == tsm_name:
tsmdetails['datasetid'] = tsm['datasetid']
tsmdetails['tsmid'] = tsm['id']
break
return tsmdetails
def _retry_volume_operation(self, operation, retries,
max_retries, jobid,
cb_volume):
"""CloudByte async calls via the FixedIntervalLoopingCall."""
# Query the CloudByte storage with this jobid
volume_response = self._queryAsyncJobResult_request(jobid)
count = retries['count']
result_res = None
if volume_response is not None:
result_res = volume_response.get('queryasyncjobresultresponse')
if result_res is None:
msg = (_(
"Null response received while querying "
"for [%(operation)s] based job [%(job)s] "
"at CloudByte storage.") %
{'operation': operation, 'job': jobid})
raise exception.VolumeBackendAPIException(data=msg)
status = result_res.get('jobstatus')
if status == 1:
LOG.info(_LI("CloudByte operation [%(operation)s] succeeded for "
"volume [%(cb_volume)s]."),
{'operation': operation, 'cb_volume': cb_volume})
raise loopingcall.LoopingCallDone()
elif status == 2:
job_result = result_res.get("jobresult")
err_msg = job_result.get("errortext")
err_code = job_result.get("errorcode")
msg = (_(
"Error in Operation [%(operation)s] "
"for volume [%(cb_volume)s] in CloudByte "
"storage: [%(cb_error)s], "
"error code: [%(error_code)s]."),
{'cb_error': err_msg,
'error_code': err_code,
'cb_volume': cb_volume,
'operation': operation})
raise exception.VolumeBackendAPIException(data=msg)
elif count == max_retries:
# All attempts exhausted
LOG.error(_LE("CloudByte operation [%(operation)s] failed"
" for volume [%(vol)s]. Exhausted all"
" [%(max)s] attempts."),
{'operation': operation,
'vol': cb_volume,
'max': max_retries})
raise loopingcall.LoopingCallDone(retvalue=False)
else:
count += 1
retries['count'] = count
LOG.debug("CloudByte operation [%(operation)s] for"
" volume [%(vol)s]: retry [%(retry)s] of [%(max)s].",
{'operation': operation,
'vol': cb_volume,
'retry': count,
'max': max_retries})
def _wait_for_volume_creation(self, volume_response, cb_volume_name):
"""Given the job wait for it to complete."""
vol_res = volume_response.get('createvolumeresponse')
if vol_res is None:
msg = _("Null response received while creating volume [%s] "
"at CloudByte storage.") % cb_volume_name
raise exception.VolumeBackendAPIException(data=msg)
jobid = vol_res.get('jobid')
if jobid is None:
msg = _("Job id not found in CloudByte's "
"create volume [%s] response.") % cb_volume_name
raise exception.VolumeBackendAPIException(data=msg)
retry_interval = (
self.configuration.cb_confirm_volume_create_retry_interval)
max_retries = (
self.configuration.cb_confirm_volume_create_retries)
retries = {'count': 0}
timer = loopingcall.FixedIntervalLoopingCall(
self._retry_volume_operation,
'Create Volume',
retries,
max_retries,
jobid,
cb_volume_name)
timer.start(interval=retry_interval).wait()
def _wait_for_volume_deletion(self, volume_response, cb_volume_id):
"""Given the job wait for it to complete."""
vol_res = volume_response.get('deleteFileSystemResponse')
if vol_res is None:
msg = _("Null response received while deleting volume [%s] "
"at CloudByte storage.") % cb_volume_id
raise exception.VolumeBackendAPIException(data=msg)
jobid = vol_res.get('jobid')
if jobid is None:
msg = _("Job id not found in CloudByte's "
"delete volume [%s] response.") % cb_volume_id
raise exception.VolumeBackendAPIException(data=msg)
retry_interval = (
self.configuration.cb_confirm_volume_delete_retry_interval)
max_retries = (
self.configuration.cb_confirm_volume_delete_retries)
retries = {'count': 0}
timer = loopingcall.FixedIntervalLoopingCall(
self._retry_volume_operation,
'Delete Volume',
retries,
max_retries,
jobid,
cb_volume_id)
timer.start(interval=retry_interval).wait()
def _get_volume_id_from_response(self, cb_volumes, volume_name):
"""Search the volume in CloudByte storage."""
vol_res = cb_volumes.get('listFilesystemResponse')
if vol_res is None:
msg = _("Null response received from CloudByte's "
"list filesystem.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = vol_res.get('filesystem')
if volumes is None:
msg = _('No volumes found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
volume_id = None
for vol in volumes:
if vol['name'] == volume_name:
volume_id = vol['id']
break
if volume_id is None:
msg = _("Volume [%s] not found in CloudByte "
"storage.") % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return volume_id
def _get_qosgroupid_id_from_response(self, cb_volumes, volume_id):
volumes = cb_volumes['listFilesystemResponse']['filesystem']
qosgroup_id = None
for vol in volumes:
if vol['id'] == volume_id:
qosgroup_id = vol['groupid']
break
return qosgroup_id
def _build_provider_details_from_volume(self, volume, chap):
model_update = {}
model_update['provider_location'] = (
'%s %s %s' % (volume['ipaddress'] + ':3260', volume['iqnname'], 0)
)
# Will provide CHAP Authentication on forthcoming patches/release
model_update['provider_auth'] = None
if chap:
model_update['provider_auth'] = ('CHAP %(username)s %(password)s'
% chap)
model_update['provider_id'] = volume['id']
LOG.debug("CloudByte volume iqn: [%(iqn)s] provider id: [%(proid)s].",
{'iqn': volume['iqnname'], 'proid': volume['id']})
return model_update
def _build_provider_details_from_response(self,
cb_volumes,
volume_name,
chap):
"""Get provider information."""
model_update = {}
volumes = cb_volumes['listFilesystemResponse']['filesystem']
for vol in volumes:
if vol['name'] == volume_name:
model_update = self._build_provider_details_from_volume(vol,
chap)
break
return model_update
def _get_initiator_group_id_from_response(self, data, filter):
"""Find iSCSI initiator group id."""
ig_list_res = data.get('listInitiatorsResponse')
if ig_list_res is None:
msg = _("Null response received from CloudByte's "
"list iscsi initiators.")
raise exception.VolumeBackendAPIException(data=msg)
ig_list = ig_list_res.get('initiator')
if ig_list is None:
msg = _('No iscsi initiators were found in CloudByte.')
raise exception.VolumeBackendAPIException(data=msg)
ig_id = None
for ig in ig_list:
if ig.get('initiatorgroup') == filter:
ig_id = ig['id']
break
return ig_id
def _get_iscsi_service_id_from_response(self, volume_id, data):
iscsi_service_res = data.get('listVolumeiSCSIServiceResponse')
if iscsi_service_res is None:
msg = _("Null response received from CloudByte's "
"list volume iscsi service.")
raise exception.VolumeBackendAPIException(data=msg)
iscsi_service_list = iscsi_service_res.get('iSCSIService')
if iscsi_service_list is None:
msg = _('No iscsi services found in CloudByte storage.')
raise exception.VolumeBackendAPIException(data=msg)
iscsi_id = None
for iscsi_service in iscsi_service_list:
if iscsi_service['volume_id'] == volume_id:
iscsi_id = iscsi_service['id']
break
if iscsi_id is None:
msg = _("No iscsi service found for CloudByte "
"volume [%s].") % volume_id
raise exception.VolumeBackendAPIException(data=msg)
else:
return iscsi_id
def _request_update_iscsi_service(self, iscsi_id, ig_id, ag_id):
params = {
"id": iscsi_id,
"igid": ig_id
}
if ag_id:
params['authgroupid'] = ag_id
params['authmethod'] = "CHAP"
self._api_request_for_cloudbyte(
'updateVolumeiSCSIService', params)
def _get_cb_snapshot_path(self, snapshot_name, volume_id):
"""Find CloudByte snapshot path."""
params = {"id": volume_id}
# List all snapshot from CloudByte
cb_snapshots_list = self._api_request_for_cloudbyte(
'listStorageSnapshots', params)
# Filter required snapshot from list
cb_snap_res = cb_snapshots_list.get('listDatasetSnapshotsResponse')
cb_snapshot = {}
if cb_snap_res is not None:
cb_snapshot = cb_snap_res.get('snapshot')
path = None
# Filter snapshot path
for snap in cb_snapshot:
if snap['name'] == snapshot_name:
path = snap['path']
break
return path
def _get_account_id_from_name(self, account_name):
params = {}
data = self._api_request_for_cloudbyte("listAccount", params)
accounts = data["listAccountResponse"]["account"]
account_id = None
for account in accounts:
if account.get("name") == account_name:
account_id = account.get("id")
break
if account_id is None:
msg = _("Failed to get CloudByte account details "
"for account [%s].") % account_name
raise exception.VolumeBackendAPIException(data=msg)
return account_id
def _search_volume_id(self, cb_volumes, cb_volume_id):
"""Search the volume in CloudByte."""
volumes_res = cb_volumes.get('listFilesystemResponse')
if volumes_res is None:
msg = _("No response was received from CloudByte's "
"list filesystem api call.")
raise exception.VolumeBackendAPIException(data=msg)
volumes = volumes_res.get('filesystem')
if volumes is None:
msg = _("No volume was found at CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
volume_id = None
for vol in volumes:
if vol['id'] == cb_volume_id:
volume_id = vol['id']
break
return volume_id
def _get_storage_info(self, tsmname):
"""Get CloudByte TSM that is associated with OpenStack backend."""
# List all TSMs from CloudByte storage
tsm_list = self._api_request_for_cloudbyte('listTsm', params={})
tsm_details_res = tsm_list.get('listTsmResponse')
if tsm_details_res is None:
msg = _("No response was received from CloudByte storage "
"list tsm API call.")
raise exception.VolumeBackendAPIException(data=msg)
tsm_details = tsm_details_res.get('listTsm')
data = {}
flag = 0
# Filter required TSM and get storage info
for tsms in tsm_details:
if tsms['name'] == tsmname:
flag = 1
data['total_capacity_gb'] = (
float(tsms['numericquota']) / units.Ki)
data['free_capacity_gb'] = (
float(tsms['availablequota']) / units.Ki)
break
# TSM not found in CloudByte storage
if flag == 0:
LOG.error(_LE("TSM [%s] not found in CloudByte storage."), tsmname)
data['total_capacity_gb'] = 0.0
data['free_capacity_gb'] = 0.0
return data
def _get_auth_group_id_from_response(self, data):
"""Find iSCSI auth group id."""
chap_group = self.configuration.cb_auth_group
ag_list_res = data.get('listiSCSIAuthGroupResponse')
if ag_list_res is None:
msg = _("Null response received from CloudByte's "
"list iscsi auth groups.")
raise exception.VolumeBackendAPIException(data=msg)
ag_list = ag_list_res.get('authgroup')
if ag_list is None:
msg = _('No iscsi auth groups were found in CloudByte.')
raise exception.VolumeBackendAPIException(data=msg)
ag_id = None
for ag in ag_list:
if ag.get('name') == chap_group:
ag_id = ag['id']
break
else:
msg = _("Auth group [%s] details not found in "
"CloudByte storage.") % chap_group
raise exception.VolumeBackendAPIException(data=msg)
return ag_id
def _get_auth_group_info(self, account_id, ag_id):
"""Fetch the auth group details."""
params = {"accountid": account_id, "authgroupid": ag_id}
auth_users = self._api_request_for_cloudbyte(
'listiSCSIAuthUser', params)
auth_user_details_res = auth_users.get('listiSCSIAuthUsersResponse')
if auth_user_details_res is None:
msg = _("No response was received from CloudByte storage "
"list iSCSI auth user API call.")
raise exception.VolumeBackendAPIException(data=msg)
auth_user_details = auth_user_details_res.get('authuser')
if auth_user_details is None:
msg = _("Auth user details not found in CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
chapuser = auth_user_details[0].get('chapusername')
chappassword = auth_user_details[0].get('chappassword')
if chapuser is None or chappassword is None:
msg = _("Invalid chap user details found in CloudByte storage.")
raise exception.VolumeBackendAPIException(data=msg)
data = {'username': chapuser, 'password': chappassword, 'ag_id': ag_id}
return data
def _get_chap_info(self, account_id):
"""Fetch the chap details."""
params = {"accountid": account_id}
iscsi_auth_data = self._api_request_for_cloudbyte(
'listiSCSIAuthGroup', params)
ag_id = self._get_auth_group_id_from_response(
iscsi_auth_data)
return self._get_auth_group_info(account_id, ag_id)
def _export(self):
model_update = {'provider_auth': None}
if self.cb_use_chap is True:
account_name = self.configuration.cb_account_name
account_id = self._get_account_id_from_name(account_name)
chap = self._get_chap_info(account_id)
model_update['provider_auth'] = ('CHAP %(username)s %(password)s'
% chap)
return model_update
def _update_initiator_group(self, volume_id, ig_name):
# Get account id of this account
account_name = self.configuration.cb_account_name
account_id = self._get_account_id_from_name(account_name)
# Fetch the initiator group ID
params = {"accountid": account_id}
iscsi_initiator_data = self._api_request_for_cloudbyte(
'listiSCSIInitiator', params)
# Filter the list of initiator groups with the name
ig_id = self._get_initiator_group_id_from_response(
iscsi_initiator_data, ig_name)
params = {"storageid": volume_id}
iscsi_service_data = self._api_request_for_cloudbyte(
'listVolumeiSCSIService', params)
iscsi_id = self._get_iscsi_service_id_from_response(
volume_id, iscsi_service_data)
# Update the iscsi service with above fetched iscsi_id
self._request_update_iscsi_service(iscsi_id, ig_id, None)
LOG.debug("CloudByte initiator group updated successfully for volume "
"[%(vol)s] with ig [%(ig)s].",
{'vol': volume_id,
'ig': ig_name})
def _get_qos_by_volume_type(self, ctxt, type_id):
"""Get the properties which can be QoS or file system related."""
update_qos_group_params = {}
update_file_system_params = {}
volume_type = volume_types.get_volume_type(ctxt, type_id)
qos_specs_id = volume_type.get('qos_specs_id')
extra_specs = volume_type.get('extra_specs')
if qos_specs_id is not None:
specs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
# Override extra specs with specs
# Hence specs will prefer QoS than extra specs
extra_specs.update(specs)
for key, value in extra_specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.configuration.cb_update_qos_group:
update_qos_group_params[key] = value
elif key in self.configuration.cb_update_file_system:
update_file_system_params[key] = value
return update_qos_group_params, update_file_system_params
def create_volume(self, volume):
qos_group_params = {}
file_system_params = {}
tsm_name = self.configuration.cb_tsm_name
account_name = self.configuration.cb_account_name
# Get account id of this account
account_id = self._get_account_id_from_name(account_name)
# Set backend storage volume name using OpenStack volume id
cb_volume_name = volume['id'].replace("-", "")
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
if type_id is not None:
qos_group_params, file_system_params = (
self._get_qos_by_volume_type(ctxt, type_id))
LOG.debug("Will create a volume [%(cb_vol)s] in TSM [%(tsm)s] "
"at CloudByte storage w.r.t "
"OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_name,
'stack_vol': volume.get('id'),
'tsm': tsm_name})
tsm_data = self._request_tsm_details(account_id)
tsm_details = self._get_tsm_details(tsm_data, tsm_name, account_name)
# Send request to create a qos group before creating a volume
LOG.debug("Creating qos group for CloudByte volume [%s].",
cb_volume_name)
qos_data = self._add_qos_group_request(
volume, tsm_details.get('tsmid'), cb_volume_name, qos_group_params)
# Extract the qos group id from response
qosgroupid = qos_data['addqosgroupresponse']['qosgroup']['id']
LOG.debug("Successfully created qos group for CloudByte volume [%s].",
cb_volume_name)
# Send a create volume request to CloudByte API
vol_data = self._create_volume_request(
volume, tsm_details.get('datasetid'), qosgroupid,
tsm_details.get('tsmid'), cb_volume_name, file_system_params)
# Since create volume is an async call;
# need to confirm the creation before proceeding further
self._wait_for_volume_creation(vol_data, cb_volume_name)
# Fetch iscsi id
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params={})
volume_id = self._get_volume_id_from_response(cb_volumes,
cb_volume_name)
params = {"storageid": volume_id}
iscsi_service_data = self._api_request_for_cloudbyte(
'listVolumeiSCSIService', params)
iscsi_id = self._get_iscsi_service_id_from_response(
volume_id, iscsi_service_data)
# Fetch the initiator group ID
params = {"accountid": account_id}
iscsi_initiator_data = self._api_request_for_cloudbyte(
'listiSCSIInitiator', params)
ig_id = self._get_initiator_group_id_from_response(
iscsi_initiator_data, 'ALL')
LOG.debug("Updating iscsi service for CloudByte volume [%s].",
cb_volume_name)
ag_id = None
chap_info = {}
if self.cb_use_chap is True:
chap_info = self._get_chap_info(account_id)
ag_id = chap_info['ag_id']
# Update the iscsi service with above fetched iscsi_id & ig_id
self._request_update_iscsi_service(iscsi_id, ig_id, ag_id)
LOG.debug("CloudByte volume [%(vol)s] updated with "
"iscsi id [%(iscsi)s] and initiator group [%(ig)s] and "
"authentication group [%(ag)s].",
{'vol': cb_volume_name, 'iscsi': iscsi_id,
'ig': ig_id, 'ag': ag_id})
# Provide the model after successful completion of above steps
provider = self._build_provider_details_from_response(
cb_volumes, cb_volume_name, chap_info)
LOG.info(_LI("Successfully created a CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_name, 'stack_vol': volume.get('id')})
return provider
def delete_volume(self, volume):
params = {}
# OpenStack source volume id
source_volume_id = volume['id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = volume.get('provider_id')
LOG.debug("Will delete CloudByte volume [%(cb_vol)s] "
"w.r.t OpenStack volume [%(stack_vol)s].",
{'cb_vol': cb_volume_id, 'stack_vol': source_volume_id})
# Delete volume at CloudByte
if cb_volume_id is not None:
cb_volumes = self._api_request_for_cloudbyte(
'listFileSystem', params)
# Search cb_volume_id in CloudByte volumes
# incase it has already been deleted from CloudByte
cb_volume_id = self._search_volume_id(cb_volumes, cb_volume_id)
# Delete volume at CloudByte
if cb_volume_id is not None:
# Need to set the initiator group to None before deleting
self._update_initiator_group(cb_volume_id, 'None')
params = {"id": cb_volume_id}
del_res = self._api_request_for_cloudbyte('deleteFileSystem',
params)
self._wait_for_volume_deletion(del_res, cb_volume_id)
LOG.info(
_LI("Successfully deleted volume [%(cb_vol)s] "
"at CloudByte corresponding to "
"OpenStack volume [%(stack_vol)s]."),
{'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
else:
LOG.error(_LE("CloudByte does not have a volume corresponding "
"to OpenStack volume [%s]."), source_volume_id)
else:
LOG.error(_LE("CloudByte volume information not available for"
" OpenStack volume [%s]."), source_volume_id)
def create_snapshot(self, snapshot):
"""Creates a snapshot at CloudByte."""
# OpenStack volume
source_volume_id = snapshot['volume_id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
if cb_volume_id is not None:
# Set backend storage snapshot name using OpenStack snapshot id
snapshot_name = "snap_" + snapshot['id'].replace("-", "")
params = {
"name": snapshot_name,
"id": cb_volume_id
}
LOG.debug(
"Will create CloudByte snapshot [%(cb_snap)s] "
"w.r.t CloudByte volume [%(cb_vol)s] "
"and OpenStack volume [%(stack_vol)s].",
{'cb_snap': snapshot_name,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
self._api_request_for_cloudbyte('createStorageSnapshot', params)
# Get the snapshot path from CloudByte
path = self._get_cb_snapshot_path(snapshot_name, cb_volume_id)
LOG.info(
_LI("Created CloudByte snapshot [%(cb_snap)s] "
"w.r.t CloudByte volume [%(cb_vol)s] "
"and OpenStack volume [%(stack_vol)s]."),
{'cb_snap': path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
model_update = {}
# Store snapshot path as snapshot provider_id
model_update['provider_id'] = path
else:
msg = _("Failed to create snapshot. CloudByte volume information "
"not found for OpenStack volume [%s].") % source_volume_id
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def create_cloned_volume(self, cloned_volume, src_volume):
"""Create a clone of an existing volume.
First it will create a snapshot of the source/parent volume,
then it creates a clone of this newly created snapshot.
"""
# Extract necessary information from input params
parent_volume_id = src_volume.get('id')
# Generating id for snapshot
# as this is not user entered in this particular usecase
snapshot_id = six.text_type(uuid.uuid1())
# Prepare the params for create_snapshot
# as well as create_volume_from_snapshot method
snapshot_params = {
'id': snapshot_id,
'volume_id': parent_volume_id,
'volume': src_volume,
}
# Create a snapshot
snapshot = self.create_snapshot(snapshot_params)
snapshot_params['provider_id'] = snapshot.get('provider_id')
# Create a clone of above snapshot
return self.create_volume_from_snapshot(cloned_volume, snapshot_params)
def create_volume_from_snapshot(self, cloned_volume, snapshot):
"""Create a clone from an existing snapshot."""
# Getting necessary data from input params
parent_volume_id = snapshot['volume_id']
cloned_volume_name = cloned_volume['id'].replace("-", "")
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
# CloudByte snapshot path equals OpenStack snapshot's provider_id
cb_snapshot_path = snapshot['provider_id']
params = {
"id": cb_volume_id,
"clonename": cloned_volume_name,
"path": cb_snapshot_path
}
LOG.debug(
"Will create CloudByte clone [%(cb_clone)s] "
"at CloudByte snapshot path [%(cb_snap)s] "
"w.r.t parent OpenStack volume [%(stack_vol)s].",
{'cb_clone': cloned_volume_name,
'cb_snap': cb_snapshot_path,
'stack_vol': parent_volume_id})
# Create clone of the snapshot
clone_dataset_snapshot_res = (
self._api_request_for_cloudbyte('cloneDatasetSnapshot', params))
cb_snap = clone_dataset_snapshot_res.get('cloneDatasetSnapshot')
cb_vol = {}
if cb_snap is not None:
cb_vol = cb_snap.get('filesystem')
else:
msg = ("Error: Clone creation failed for "
"OpenStack volume [%(vol)s] with CloudByte "
"snapshot path [%(path)s]" %
{'vol': parent_volume_id, 'path': cb_snapshot_path})
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(
_LI("Created a clone [%(cb_clone)s] "
"at CloudByte snapshot path [%(cb_snap)s] "
"w.r.t parent OpenStack volume [%(stack_vol)s]."),
{'cb_clone': cloned_volume_name,
'cb_snap': cb_snapshot_path,
'stack_vol': parent_volume_id})
chap_info = {}
if self.cb_use_chap is True:
account_name = self.configuration.cb_account_name
# Get account id of this account
account_id = self._get_account_id_from_name(account_name)
chap_info = self._get_chap_info(account_id)
model_update = self._build_provider_details_from_volume(cb_vol,
chap_info)
return model_update
def delete_snapshot(self, snapshot):
"""Delete a snapshot at CloudByte."""
# Find volume id
source_volume_id = snapshot['volume_id']
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = snapshot.get('volume').get('provider_id')
# CloudByte snapshot path equals OpenStack snapshot's provider_id
cb_snapshot_path = snapshot['provider_id']
# If cb_snapshot_path is 'None'
# then no need to execute CloudByte API
if cb_snapshot_path is not None:
params = {
"id": cb_volume_id,
"path": cb_snapshot_path
}
LOG.debug("Will delete CloudByte snapshot [%(snap)s] w.r.t "
"parent CloudByte volume [%(cb_vol)s] "
"and parent OpenStack volume [%(stack_vol)s].",
{'snap': cb_snapshot_path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
# Execute CloudByte API
self._api_request_for_cloudbyte('deleteSnapshot', params)
LOG.info(
_LI("Deleted CloudByte snapshot [%(snap)s] w.r.t "
"parent CloudByte volume [%(cb_vol)s] "
"and parent OpenStack volume [%(stack_vol)s]."),
{'snap': cb_snapshot_path,
'cb_vol': cb_volume_id,
'stack_vol': source_volume_id})
else:
LOG.error(_LE("CloudByte snapshot information is not available"
" for OpenStack volume [%s]."), source_volume_id)
def extend_volume(self, volume, new_size):
# CloudByte volume id equals OpenStack volume's provider_id
cb_volume_id = volume.get('provider_id')
params = {
"id": cb_volume_id,
"quotasize": six.text_type(new_size) + 'G'
}
# Request the CloudByte api to update the volume
self._api_request_for_cloudbyte('updateFileSystem', params)
def create_export(self, context, volume, connector):
"""Setup the iscsi export info."""
return self._export()
def ensure_export(self, context, volume):
"""Verify the iscsi export info."""
return self._export()
def get_volume_stats(self, refresh=False):
"""Get volume statistics.
If 'refresh' is True, update/refresh the statistics first.
"""
if refresh:
# Get the TSM name from configuration
tsm_name = self.configuration.cb_tsm_name
# Get the storage details of this TSM
data = self._get_storage_info(tsm_name)
data["volume_backend_name"] = (
self.configuration.safe_get('volume_backend_name') or
'CloudByte')
data["vendor_name"] = 'CloudByte'
data['reserved_percentage'] = 0
data["driver_version"] = CloudByteISCSIDriver.VERSION
data["storage_protocol"] = 'iSCSI'
LOG.debug("CloudByte driver stats: [%s].", data)
# Set this to the instance variable
self.volume_stats = data
return self.volume_stats
def retype(self, ctxt, volume, new_type, diff, host):
"""Retypes a volume, QoS and file system update is only done."""
cb_volume_id = volume.get('provider_id')
if cb_volume_id is None:
message = _("Provider information w.r.t CloudByte storage "
"was not found for OpenStack "
"volume [%s].") % volume['id']
raise exception.VolumeBackendAPIException(message)
update_qos_group_params, update_file_system_params = (
self._get_qos_by_volume_type(ctxt, new_type['id']))
if update_qos_group_params:
list_file_sys_params = {'id': cb_volume_id}
response = self._api_request_for_cloudbyte(
'listFileSystem', list_file_sys_params)
response = response['listFilesystemResponse']
cb_volume_list = response['filesystem']
cb_volume = cb_volume_list[0]
if not cb_volume:
msg = (_("Volume [%(cb_vol)s] was not found at "
"CloudByte storage corresponding to OpenStack "
"volume [%(ops_vol)s].") %
{'cb_vol': cb_volume_id, 'ops_vol': volume['id']})
raise exception.VolumeBackendAPIException(data=msg)
update_qos_group_params['id'] = cb_volume.get('groupid')
self._api_request_for_cloudbyte(
'updateQosGroup', update_qos_group_params)
if update_file_system_params:
update_file_system_params['id'] = cb_volume_id
self._api_request_for_cloudbyte(
'updateFileSystem', update_file_system_params)
LOG.info(_LI("Successfully updated CloudByte volume [%(cb_vol)s] "
"corresponding to OpenStack volume [%(ops_vol)s]."),
{'cb_vol': cb_volume_id, 'ops_vol': volume['id']})
return True
| |
"""
Various tests related to validation for automated signing.
"""
from nose.tools import eq_
from .helper import RegexTestCase
from .js_helper import TestCase
from validator.testcases import regex
from validator.testcases.regex import maybe_tuple
class TestSearchService(TestCase, RegexTestCase):
"""Tests that warnings related to the search service trigger warnings."""
def test_changes(self):
"""Tests that setting changes trigger warnings."""
def test(obj, prop, stuff, warnings):
self.setUp()
self.run_script("""%s.%s%s;""" % (obj, prop, stuff))
self.assert_failed(with_warnings=warnings)
objs = ("Cc[''].getService(Ci.nsIBrowserSearchService)",
'Services.search')
for obj in objs:
warnings = {'signing_severity': 'high',
'id': ('testcases_javascript_actions',
'search_service',
'changes')},
for prop in 'currentEngine', 'defaultEngine':
yield test, obj, prop, ' = foo', warnings
warnings[0]['signing_severity'] = 'medium'
for meth in ('addEngine', 'addEngineWithDetails',
'removeEngine', 'moveEngine'):
yield test, obj, meth, '(foo, bar, baz)', warnings
def test_registry_write(self):
"""Tests that Windows registry writes trigger warnings."""
warnings = ({'id': ('testcases_javascript_actions',
'windows_registry', 'write'),
'signing_severity': 'medium'},
{'id': ('js', 'traverser', 'dangerous_global'),
'signing_severity': 'low'})
def test(method):
self.setUp()
self.run_script("""
Cc[""].createInstance(Ci.nsIWindowsRegKey).%s(foo, bar);
""" % method)
self.assert_failed(with_warnings=warnings)
for method in ('create', 'createChild', 'writeBinaryValue',
'writeInt64Value', 'writeIntValue', 'writeStringValue'):
yield test, method
def test_evalInSandbox(self):
"""Tests that evalInSandbox causes signing warnings."""
self.run_script("""
Cu.evalInSandbox("foobar()", sandbox);
""")
self.assert_failed(with_warnings=[{'signing_severity': 'low'}])
def test_pref_branches(self):
"""
Tests that writes to potentially dangerous preferences are flagged.
"""
def test(pref, severity):
warnings = [
{'message': 'Attempt to set a dangerous preference',
'signing_severity': severity}]
self.setUp()
self.run_script("""
Services.prefs.setCharPref('%s', '42');
""" % pref)
self.assert_failed(with_warnings=warnings)
PREFS = (('browser.newtab.url', 'high'),
('browser.newtabpage.enabled', 'high'),
('browser.search.defaultenginename', 'high'),
('browser.startup.homepage', 'high'),
('keyword.URL', 'high'),
('keyword.enabled', 'high'),
('app.update.*', 'high'),
('browser.addon-watch.*', 'high'),
('datareporting.', 'high'),
('extensions.blocklist.*', 'high'),
('extensions.getAddons.*', 'high'),
('extensions.update.*', 'high'),
('security.*', 'high'),
('network.proxy.*', 'low'),
('network.http.*', 'low'),
('network.websocket.*', 'low'))
for pref, severity in PREFS:
yield test, pref, severity
def test_pref_composed_branches(self):
"""
Tests that preference warnings still happen when branches are composed
via `getBranch`.
"""
warnings = [
{'message': 'Attempt to set a dangerous preference',
'signing_severity': 'high'}]
self.run_script("""
Services.prefs.getBranch('browser.star')
.setCharPref('tup.homepage', 'http://evil.com');
""")
self.assert_failed(with_warnings=warnings)
self.setUp()
self.run_script("""
let set = Services.prefs.getBranch('browser.star').setCharPref;
set('tup.homepage', 'http://evil.com');
""")
self.assert_failed(with_warnings=warnings)
def test_pref_literals_reported_once(self):
"""Tests that warnings for preference literals are reported only when
necessary."""
CALL_WARNING = {'id': ('testcases_javascript_actions',
'_call_expression', 'called_set_preference')}
LITERAL_WARNING = {'id': regex.PREFERENCE_ERROR_ID}
SUMMARY = {'trivial': 0,
'low': 0,
'medium': 0,
'high': 1}
# Literal without pref set call.
self.run_script("""
frob('browser.startup.homepage');
""")
self.assert_failed(with_warnings=[LITERAL_WARNING])
eq_(len(self.err.warnings), 1)
eq_(self.err.signing_summary, SUMMARY)
# Literal with pref set call.
for method in ('Services.prefs.setCharPref', 'Preferences.set'):
self.setUp()
self.run_script("""
%s('browser.startup.homepage', '');
""" % method)
self.assert_failed(with_warnings=[CALL_WARNING])
eq_(len(self.err.warnings), 1)
eq_(self.err.signing_summary, SUMMARY)
# Literal with pref set call on different line.
self.setUp()
self.run_script("""
let bsh = 'browser.startup.homepage';
Services.prefs.setCharPref(bsh, '');
""")
SUMMARY['high'] += 1
self.assert_failed(with_warnings=[CALL_WARNING, LITERAL_WARNING])
eq_(len(self.err.warnings), 2)
eq_(self.err.signing_summary, SUMMARY)
def test_get_preference_calls_ignored(self):
"""Tests that string literals provably used only to read, but not
write, preferences do not cause warnings."""
LITERAL_WARNING = {'id': regex.PREFERENCE_ERROR_ID}
# Literal without pref get or set call.
self.run_script("""
frob('browser.startup.homepage');
""")
self.assert_failed(with_warnings=[LITERAL_WARNING])
eq_(len(self.err.warnings), 1)
# Literal passed directly pref get call.
for method in ('Services.prefs.getCharPref',
'Preferences.get'):
self.setUp()
self.run_script("""
let thing = %s('browser.startup.homepage');
""" % method)
eq_(len(self.err.warnings), 0)
# Literal passed indirectly pref get call.
self.setUp()
self.run_script("""
let bsh = 'browser.sta' + 'rtup.homepage';
let thing = Services.prefs.getCharPref(bsh);
""")
self.assert_failed(with_warnings=[LITERAL_WARNING])
eq_(len(self.err.warnings), 1)
def test_pref_help_added_to_bare_strings(self):
"""Test that a help messages about passing literals directly to
APIs is added only to bare strings."""
self.run_script("""
'browser.startup.homepage';
Preferences.set('browser.startup.homepage');
""")
warnings = self.err.warnings
assert warnings[0]['id'] == regex.PREFERENCE_ERROR_ID
assert warnings[1]['id'] == ('testcases_javascript_actions',
'_call_expression',
'called_set_preference')
# Check that descriptions and help are the same, except for
# an added message in the bare string.
for key in 'description', 'signing_help':
val1 = maybe_tuple(warnings[0][key])
val2 = maybe_tuple(warnings[1][key])
eq_(val2, val1[:len(val2)])
# And that the added message is what we expect.
assert 'Preferences.get' in val1[-1]
def test_profile_filenames(self):
"""
Test that references to critical files in the user profile cause
warnings.
"""
warnings = [
{'id': ('testcases_regex', 'string', 'profile_filenames'),
'message': 'Reference to critical user profile data',
'signing_severity': 'low'}]
def fail(script):
self.setUp()
self.run_script(script)
self.assert_failed(with_warnings=warnings)
paths = (r'addons.json',
r'safebrowsing',
r'safebrowsing\\foo.bar',
r'safebrowsing/foo.bar')
patterns = ("'%s'",
"'/%s'",
"'\\%s'")
for path in paths:
for pattern in patterns:
yield fail, pattern % path
yield fail, "'addons' + '.json'"
def test_categories(self):
"""Tests that dangerous category names are flagged in JS strings."""
warning = {'id': ('testcases_chromemanifest', 'test_resourcemodules',
'resource_modules'),
'message': 'Potentially dangerous category entry',
'signing_severity': 'medium',
'editors_only': True}
self.run_script("'JavaScript-global-property'")
self.assert_failed(with_warnings=[warning])
def test_proxy_filter(self):
"""Tests that registering a proxy filter generates a warning."""
warning = {'id': ('testcases_javascript_actions',
'predefinedentities', 'proxy_filter'),
'signing_severity': 'low',
'editors_only': True}
self.run_script("""
Cc[""].getService(Ci.nsIProtocolProxyService)
.registerFilter(foo, 0);
""")
self.assert_failed(with_warnings=[warning])
def test_addon_install(self):
"""Tests attempts to install an add-on are flagged."""
warning = {'id': ('js', 'traverser', 'dangerous_global'),
'editors_only': True,
'signing_severity': 'high'}
def test(method):
self.setUp()
self.run_script("""
AddonManager.%s(location, callback, plus, some, other, stuff);
""" % method)
self.assert_failed(with_warnings=[warning])
for method in (u'getInstallForFile',
u'getInstallForURL'):
yield test, method
def test_addon_settings(self):
"""Tests that attempts to change add-on settings via the
AddonManager API are flagged."""
warning = {
'description':
'Changing this preference may have severe security '
'implications, and is forbidden under most circumstances.',
'editors_only': True,
'signing_severity': 'high'}
props = (u'autoUpdateDefault',
u'checkUpdateSecurity',
u'checkUpdateSecurityDefault',
u'updateEnabled')
def test(prop):
self.setUp()
self.run_script('AddonManager.%s = false;' % prop)
self.assert_failed(with_warnings=[warning])
for prop in props:
yield test, prop
def test_ctypes(self):
"""Tests that usage of `ctypes` generates warnings."""
self.run_script("""
ctypes.open("libc.so.6");
""")
self.assert_failed(with_warnings=[
{'id': ('js', 'traverser', 'dangerous_global'),
'editors_only': True,
'signing_severity': 'high'}])
def test_nsIProcess(self):
"""Tests that usage of `nsIProcess` generates warnings."""
self.run_script("""
Cc[""].createInstance(Ci.nsIProcess);
""")
self.assert_failed(with_warnings=[
{'id': ('js', 'traverser', 'dangerous_global'),
'editors_only': True,
'signing_severity': 'high'}])
def test_eval(self):
"""Tests that usage of eval-related functions generates warnings."""
functions = ('eval',
'Function',
'setTimeout',
'setInterval')
warning = {'id': ('javascript', 'dangerous_global', 'eval'),
'signing_severity': 'high'}
def test(func):
self.setUp()
self.run_script("%s('doEvilStuff()')" % func)
self.assert_failed(with_warnings=[warning])
for func in functions:
yield test, func
def test_cert_service(self):
"""Tests that changes to certificate trust leads to warnings."""
interfaces = ('nsIX509CertDB',
'nsIX509CertDB2',
'nsIX509CertList',
'nsICertOverrideService')
contracts = ('@mozilla.org/security/x509certdb;1',
'@mozilla.org/security/x509certlist;1',
'@mozilla.org/security/certoverride;1')
warning = {'id': ('javascript', 'predefinedentities', 'cert_db'),
'editors_only': True,
'signing_severity': 'high'}
def fail(script):
self.setUp()
self.run_script(script)
self.assert_failed(with_warnings=[warning])
for interface in interfaces:
yield fail, "Cc[''].getService(Ci.%s)" % interface
for contract in contracts:
yield fail, "Cc['%s'].getService()" % contract
def test_new_tab_page(self):
"""Tests that attempts to replace about:newtab are flagged."""
patterns = (
"if (foo == 'about:newtab') doStuff();",
'if (bar === "about:blank") doStuff();',
"if (baz==='about:newtab') doStuff();",
"if ('about:newtab' == thing) doStuff();",
'/^about:newtab$/.test(thing)',
'/about:newtab/.test(thing)',
"'@mozilla.org/network/protocol/about;1?what=newtab'")
warning = {'signing_severity': 'low'}
def fail(script):
self.setUp()
self.run_js_regex(script)
self.assert_failed(with_warnings=[warning])
for pattern in patterns:
yield fail, pattern
def test_script_creation(self):
"""Tests that creation of script tags generates warnings."""
warning = {'id': ('testcases_javascript_instanceactions',
'_call_expression', 'called_createelement'),
'signing_severity': 'medium'}
self.run_script("""
doc.createElement("script");
""")
self.assert_failed(with_warnings=[warning])
def test_event_attributes(self):
"""Tests that creation of event handler attributes is flagged."""
warning = {'id': ('testcases_javascript_instanceactions',
'setAttribute', 'setting_on*'),
'signing_severity': 'medium'}
self.run_script("""
elem.setAttribute("onhover", "doStuff();" + with_stuff);
""")
self.assert_failed(with_warnings=[warning])
def test_event_attributes_innerhtml(self):
"""Tests that creation of event handler attributes via innerHTML
assignment is flagged."""
warning = {'id': ('testcases_javascript_instancetypes',
'set_innerHTML', 'event_assignment'),
'signing_severity': 'medium'}
self.run_script("""
elem.innerHTML = '<a onhover="doEvilStuff()"></a>';
""")
self.assert_failed(with_warnings=[warning])
def test_contentScript_dynamic_values(self):
"""Tests that dynamic values passed as contentScript properties
trigger signing warnings."""
warning = {'id': ('testcases_javascript_instanceproperties',
'contentScript', 'set_non_literal'),
'signing_severity': 'high'}
self.run_script("""
tab.attach({ contentScript: evil })
""")
self.assert_failed(with_warnings=[warning])
def test_contentScript_static_values(self):
"""Tests that static, verifiable values passed as contentScripts
trigger no warnings, but unsafe static values do."""
# Test safe value.
self.run_script("""
tab.attach({ contentScript: "everythingIsCool()" })
""")
self.assert_silent()
# Test unsafe value.
warning = {'id': ('testcases_javascript_instanceactions',
'_call_expression', 'called_createelement'),
'signing_severity': 'medium'}
self.setUp()
self.run_script("""
tab.attach({ contentScript: 'doc.createElement("script")' });
""")
self.assert_failed(with_warnings=[warning])
| |
import pytest
import numpy as np
import tensorflow as tf
from tfsnippet.utils import *
class IntShapeTestCase(tf.test.TestCase):
def test_int_shape(self):
self.assertEqual(get_static_shape(tf.zeros([1, 2, 3])), (1, 2, 3))
self.assertEqual(
get_static_shape(tf.placeholder(tf.float32, [None, 2, 3])),
(None, 2, 3)
)
self.assertIsNone(get_static_shape(tf.placeholder(tf.float32, None)))
class ResolveNegativeAxisTestCase(tf.test.TestCase):
def test_resolve_negative_axis(self):
# good case
self.assertEqual(resolve_negative_axis(4, (0, 1, 2)), (0, 1, 2))
self.assertEqual(resolve_negative_axis(4, (0, -1, -2)), (0, 3, 2))
# bad case
with pytest.raises(ValueError, match='`axis` out of range: \\(-5,\\) '
'vs ndims 4.'):
_ = resolve_negative_axis(4, (-5,))
with pytest.raises(ValueError, match='`axis` has duplicated elements '
'after resolving negative axis.'):
_ = resolve_negative_axis(4, (0, -4))
class GetBatchSizeTestCase(tf.test.TestCase):
def test_get_batch_size(self):
def run_check(sess, x, axis, x_in=None, dynamic=True):
if x_in is None:
x_in = tf.constant(x)
dynamic = False
batch_size = get_batch_size(x_in, axis)
if dynamic:
self.assertIsInstance(batch_size, tf.Tensor)
self.assertEqual(sess.run(batch_size, feed_dict={x_in: x}),
x.shape[axis])
else:
self.assertEqual(batch_size, x.shape[axis])
with self.test_session() as sess:
x = np.zeros([2, 3, 4], dtype=np.float32)
# check when shape is totally static
run_check(sess, x, 0)
run_check(sess, x, 1)
run_check(sess, x, 2)
run_check(sess, x, -1)
# check when some shape is dynamic, but the batch axis is not
run_check(sess, x, 0, tf.placeholder(tf.float32, [2, None, None]),
dynamic=False)
run_check(sess, x, 1, tf.placeholder(tf.float32, [None, 3, None]),
dynamic=False)
run_check(sess, x, 2, tf.placeholder(tf.float32, [None, None, 4]),
dynamic=False)
run_check(sess, x, -1, tf.placeholder(tf.float32, [None, None, 4]),
dynamic=False)
# check when the batch axis is dynamic
run_check(sess, x, 0, tf.placeholder(tf.float32, [None, 3, 4]),
dynamic=True)
run_check(sess, x, 1, tf.placeholder(tf.float32, [2, None, 4]),
dynamic=True)
run_check(sess, x, 2, tf.placeholder(tf.float32, [2, 3, None]),
dynamic=True)
run_check(sess, x, -1, tf.placeholder(tf.float32, [2, 3, None]),
dynamic=True)
# check when the shape is totally dynamic
x_in = tf.placeholder(tf.float32, None)
run_check(sess, x, 0, x_in, dynamic=True)
run_check(sess, x, 1, x_in, dynamic=True)
run_check(sess, x, 2, x_in, dynamic=True)
run_check(sess, x, -1, x_in, dynamic=True)
class GetRankTestCase(tf.test.TestCase):
def test_get_rank(self):
with self.test_session() as sess:
# test static shape
ph = tf.placeholder(tf.float32, (1, 2, 3))
self.assertEqual(get_rank(ph), 3)
# test partially dynamic shape
ph = tf.placeholder(tf.float32, (1, None, 3))
self.assertEqual(get_rank(ph), 3)
# test totally dynamic shape
ph = tf.placeholder(tf.float32, None)
self.assertEqual(
sess.run(get_rank(ph), feed_dict={
ph: np.arange(6, dtype=np.float32).reshape((1, 2, 3))
}),
3
)
class GetDimensionSizeTestCase(tf.test.TestCase):
def test_get_dimension_size(self):
with self.test_session() as sess:
# test static shape
ph = tf.placeholder(tf.float32, (1, 2, 3))
self.assertEqual(get_dimension_size(ph, 0), 1)
self.assertEqual(get_dimension_size(ph, 1), 2)
self.assertEqual(get_dimension_size(ph, 2), 3)
self.assertEqual(get_dimension_size(ph, -1), 3)
# test dynamic shape, but no dynamic axis is queried
ph = tf.placeholder(tf.float32, (1, None, 3))
self.assertEqual(get_dimension_size(ph, 0), 1)
self.assertEqual(get_dimension_size(ph, 2), 3)
self.assertEqual(get_dimension_size(ph, -1), 3)
# test dynamic shape
def _assert_equal(a, b):
self.assertIsInstance(a, tf.Tensor)
self.assertEqual(sess.run(a, feed_dict={ph: ph_in}), b)
ph = tf.placeholder(tf.float32, (1, None, 3))
ph_in = np.arange(6, dtype=np.float32).reshape((1, 2, 3))
_assert_equal(get_dimension_size(ph, 1), 2)
_assert_equal(get_dimension_size(ph, -2), 2)
axis_ph = tf.placeholder(tf.int32, None)
self.assertEqual(
sess.run(get_dimension_size(ph, axis_ph),
feed_dict={ph: ph_in, axis_ph: 1}),
2
)
# test fully dynamic shape
ph = tf.placeholder(tf.float32, None)
_assert_equal(get_dimension_size(ph, 0), 1)
_assert_equal(get_dimension_size(ph, 1), 2)
_assert_equal(get_dimension_size(ph, 2), 3)
_assert_equal(get_dimension_size(ph, -2), 2)
def test_get_dimensions_size(self):
with self.test_session() as sess:
# test empty query
ph = tf.placeholder(tf.float32, None)
self.assertTupleEqual(get_dimensions_size(ph, ()), ())
# test static shape
ph = tf.placeholder(tf.float32, (1, 2, 3))
self.assertTupleEqual(get_dimensions_size(ph), (1, 2, 3))
self.assertTupleEqual(get_dimensions_size(ph, [0]), (1,))
self.assertTupleEqual(get_dimensions_size(ph, [1]), (2,))
self.assertTupleEqual(get_dimensions_size(ph, [2]), (3,))
self.assertTupleEqual(get_dimensions_size(ph, [2, 0, 1]), (3, 1, 2))
# test dynamic shape, but no dynamic axis is queried
ph = tf.placeholder(tf.float32, (1, None, 3))
self.assertTupleEqual(get_dimensions_size(ph, [0]), (1,))
self.assertTupleEqual(get_dimensions_size(ph, [2]), (3,))
self.assertTupleEqual(get_dimensions_size(ph, [2, 0]), (3, 1))
# test dynamic shape
def _assert_equal(a, b):
ph_in = np.arange(6, dtype=np.float32).reshape((1, 2, 3))
self.assertIsInstance(a, tf.Tensor)
np.testing.assert_equal(sess.run(a, feed_dict={ph: ph_in}), b)
ph = tf.placeholder(tf.float32, (1, None, 3))
_assert_equal(get_dimensions_size(ph), (1, 2, 3))
_assert_equal(get_dimensions_size(ph, [1]), (2,))
_assert_equal(get_dimensions_size(ph, [2, 0, 1]), (3, 1, 2))
# test fully dynamic shape
ph = tf.placeholder(tf.float32, None)
_assert_equal(get_dimensions_size(ph), (1, 2, 3))
_assert_equal(get_dimensions_size(ph, [0]), (1,))
_assert_equal(get_dimensions_size(ph, [1]), (2,))
_assert_equal(get_dimensions_size(ph, [2]), (3,))
_assert_equal(get_dimensions_size(ph, [2, 0, 1]), (3, 1, 2))
def test_get_shape(self):
with self.test_session() as sess:
# test static shape
ph = tf.placeholder(tf.float32, (1, 2, 3))
self.assertTupleEqual(get_shape(ph), (1, 2, 3))
# test dynamic shape
def _assert_equal(a, b):
ph_in = np.arange(6, dtype=np.float32).reshape((1, 2, 3))
self.assertIsInstance(a, tf.Tensor)
np.testing.assert_equal(sess.run(a, feed_dict={ph: ph_in}), b)
ph = tf.placeholder(tf.float32, (1, None, 3))
_assert_equal(get_shape(ph), (1, 2, 3))
# test fully dynamic shape
ph = tf.placeholder(tf.float32, None)
_assert_equal(get_shape(ph), (1, 2, 3))
class ConcatShapesTestCase(tf.test.TestCase):
def test_concat_shapes(self):
with self.test_session() as sess:
# test empty
self.assertTupleEqual(concat_shapes(()), ())
# test static shapes
self.assertTupleEqual(
concat_shapes(iter([
(1, 2),
(3,),
(),
(4, 5)
])),
(1, 2, 3, 4, 5)
)
# test having dynamic shape
shape = concat_shapes([
(1, 2),
tf.constant([3], dtype=tf.int32),
(),
tf.constant([4, 5], dtype=tf.int32),
])
self.assertIsInstance(shape, tf.Tensor)
np.testing.assert_equal(sess.run(shape), (1, 2, 3, 4, 5))
class IsShapeEqualTestCase(tf.test.TestCase):
def test_is_shape_equal(self):
def check(x, y, x_ph=None, y_ph=None):
ans = x.shape == y.shape
feed_dict = {}
if x_ph is not None:
feed_dict[x_ph] = x
x = x_ph
if y_ph is not None:
feed_dict[y_ph] = y
y = y_ph
result = is_shape_equal(x, y)
if is_tensor_object(result):
result = sess.run(result, feed_dict=feed_dict)
self.assertEqual(result, ans)
with self.test_session() as sess:
# check static shapes
x1 = np.random.normal(size=[2, 3, 4])
x2 = np.random.normal(size=[2, 1, 4])
x3 = np.random.normal(size=[1, 2, 3, 4])
check(x1, np.copy(x1))
check(x1, x2)
check(x1, x3)
# check partial dynamic shapes
x1_ph = tf.placeholder(dtype=tf.float32, shape=[2, None, 4])
x2_ph = tf.placeholder(dtype=tf.float32, shape=[2, None, 4])
x3_ph = tf.placeholder(dtype=tf.float32, shape=[None] * 4)
check(x1, np.copy(x1), x1_ph, x2_ph)
check(x1, x2, x1_ph, x2_ph)
check(x1, x3, x1_ph, x3_ph)
# check fully dimension shapes
x1_ph = tf.placeholder(dtype=tf.float32, shape=None)
x2_ph = tf.placeholder(dtype=tf.float32, shape=None)
x3_ph = tf.placeholder(dtype=tf.float32, shape=None)
check(x1, np.copy(x1), x1_ph, x2_ph)
check(x1, x2, x1_ph, x2_ph)
check(x1, x3, x1_ph, x3_ph)
| |
from __future__ import print_function
import os
import click
import json
import logging
import functools
import sys
import time
import girder_client
import paramiko
from contextlib import contextmanager
from cumulus.scripts.command import (cli, pass_proxy,
get_aws_instance_info,
get_aws_volume_info)
from cumulus.scripts.command import (profile,
cluster,
create_profile,
create_cluster,
launch_cluster,
create_volume,
attach_volume,
detach_volume,
delete_volume,
terminate_cluster,
delete_cluster,
delete_profile)
test_failures = []
def report():
if len(test_failures) == 0:
print('\nAll tests passed')
sys.exit(0)
else:
print('\nTest failures present!')
for test in test_failures:
print(' {}'.format(test))
sys.exit(1)
def test_case(func):
@functools.wraps(func)
def _catch_exceptions(*args, **kwargs):
try:
ctx, proxy = args[0], args[1]
if proxy.verbose >= 1:
print('%s...' % func.__name__)
sys.stdout.flush()
func(*args, **kwargs)
if proxy.verbose >= 1:
print('%s...OK' % func.__name__)
else:
print('.', end='')
except AssertionError as e:
test_failures.append(func.__name__)
if proxy.verbose >= 1:
import traceback
print('ERROR')
traceback.print_exc()
else:
print('E', end='')
sys.stdout.flush()
_catch_exceptions = click.pass_context(_catch_exceptions)
_catch_exceptions = pass_proxy(_catch_exceptions)
return _catch_exceptions
@contextmanager
def clean_proxy(proxy):
if hasattr(proxy, '_volume'):
del(proxy._volume)
if hasattr(proxy, '_cluster'):
del(proxy._cluster)
if hasattr(proxy, '_profile'):
del(proxy._profile)
yield
def invoke_with_clean_proxy(ctx, proxy):
def _invoke_with_clean_proxy(func, **kwargs):
with clean_proxy(proxy):
ctx.invoke(func, **kwargs)
return _invoke_with_clean_proxy
@cli.command()
@click.option('--profile_section', default='profile')
@test_case
def test_profile(ctx, proxy, profile_section):
"""Test profile creation/deletion."""
proxy.profile_section = profile_section
num_profiles = len(proxy.profiles)
invoke = invoke_with_clean_proxy(ctx, proxy)
invoke(create_profile)
assert len(proxy.profiles) == num_profiles + 1, \
'After create_profile only one profile should exist'
invoke(delete_profile)
assert len(proxy.profiles) == num_profiles, \
'After delete_profile no profiles should exist'
@cli.command()
@click.option('--profile_section', default='profile')
@click.option('--cluster_section', default='cluster')
@test_case
def test_cluster(ctx, proxy, profile_section, cluster_section):
"""Test cluster creation/deletion."""
num_clusters = len(proxy.clusters)
proxy.profile_section = profile_section
proxy.cluster_section = cluster_section
invoke = invoke_with_clean_proxy(ctx, proxy)
invoke(create_profile)
invoke(create_cluster)
assert len(proxy.clusters) == num_clusters + 1, \
'After create_cluster only one profile should exist'
invoke(delete_cluster)
invoke(delete_profile)
assert len(proxy.clusters) == num_clusters, \
'After delete_cluster no profiles should exist'
def get_instance_hash(proxy):
headers, data = get_aws_instance_info(proxy)
return {d['ID']: d for d in [dict(zip(headers, i)) for i in data]}
@cli.command()
@click.option('--profile_section', default='profile')
@click.option('--cluster_section', default='cluster')
@test_case
def test_launch_cluster(ctx, proxy, profile_section, cluster_section):
"""Test launching/terminating a cluster."""
proxy.profile_section = profile_section
proxy.cluster_section = cluster_section
invoke = invoke_with_clean_proxy(ctx, proxy)
begin = get_instance_hash(proxy)
invoke(create_profile)
invoke(create_cluster)
invoke(launch_cluster)
middle = get_instance_hash(proxy)
instance_ids = set(middle.keys()) - set(begin.keys())
assert len(instance_ids) == 2, \
'Two instances should have been created'
for instance in [middle[i] for i in instance_ids]:
assert instance['State'] == 'running', \
'Instance {} is not running'.format(instance['ID'])
assert instance['Type'] == 't2.nano', \
'Instance {} is not a t2.nano instance'.format(instance['ID'])
invoke(terminate_cluster)
invoke(delete_cluster)
invoke(delete_profile)
end = get_instance_hash(proxy)
for instance in [end[i] for i in instance_ids]:
assert instance['State'] == 'terminated', \
'Instance {} is not running'.format(instance['ID'])
def get_volume_hash(proxy):
headers, data = get_aws_volume_info(proxy)
return {d['Volume ID']: d for d in [dict(zip(headers, i)) for i in data]}
@cli.command()
@click.option('--profile_section', default='profile')
@click.option('--cluster_section', default='cluster')
@click.option('--volume_section', default='volume')
@test_case
def test_volume(ctx, proxy, profile_section, cluster_section, volume_section):
"""Test attaching/detaching/deleting a volume"""
proxy.profile_section = profile_section
proxy.cluster_section = cluster_section
proxy.volume_section = volume_section
invoke = invoke_with_clean_proxy(ctx, proxy)
invoke(create_profile)
invoke(create_cluster)
invoke(launch_cluster)
begin = get_volume_hash(proxy)
invoke(create_volume)
invoke(attach_volume)
after_attach = get_volume_hash(proxy)
vol_ids = set(after_attach.keys()) - set(begin.keys())
assert len(vol_ids) == 1, \
'Should have found only one volume'
vol_id = vol_ids.pop()
assert after_attach[vol_id]['State'] == 'in-use'
assert after_attach[vol_id]['Size'] == 12
girder_vol = proxy.volumes[0]
girder_cluster = proxy.clusters[0]
# local girder status is 'in-use' (attached)
assert girder_vol['status'] == 'in-use'
# volume has right aws id
assert vol_id in [v['ec2']['id'] for v in proxy.volumes]
# cluster has knowledge of girder volume
assert girder_vol['_id'] in girder_cluster['volumes']
invoke(detach_volume)
after_detach = get_volume_hash(proxy)
# Remote state is 'available'
assert after_detach[vol_id]['State'] == 'available'
girder_vol = proxy.volumes[0]
girder_cluster = proxy.clusters[0]
# local girder status is available
assert girder_vol['status'] == 'available'
# volume has been removed from local girder cluster's volume list
assert girder_vol['_id'] not in girder_cluster['volumes']
invoke(delete_volume)
after = get_volume_hash(proxy)
# Removed out on AWS
assert vol_id not in after.keys()
# Removed locally
assert len(proxy.volumes) == 0
invoke(terminate_cluster)
invoke(delete_cluster)
invoke(delete_profile)
###############################################################################
# Taskflow Integration tests
#
def create_taskflow(proxy, cls_name):
r = proxy.post('taskflows', data=json.dumps({
'taskFlowClass': cls_name
}))
return r['_id']
def wait_for_taskflow_status(proxy, taskflow_id, state, timeout=60):
def _get_taskflow_numbers(status_response):
tasks_url = 'taskflows/%s/tasks' % (taskflow_id)
try:
r = proxy.get(tasks_url)
logging.info('Tasks in flow: %d' % len(r))
except girder_client.HttpError:
pass
proxy.wait_for_status(
'taskflows/%s/status' % (taskflow_id), state,
timeout=timeout,
log_url='taskflows/%s/log' % (taskflow_id),
callback=_get_taskflow_numbers)
@cli.command()
@test_case
def test_simple_taskflow(ctx, proxy):
"""Test a simple taskflow"""
logging.info('Running simple taskflow ...')
taskflow_id = create_taskflow(
proxy, 'cumulus.taskflow.core.test.mytaskflows.SimpleTaskFlow')
# Start the task flow
proxy.put('taskflows/%s/start' % (taskflow_id))
wait_for_taskflow_status(proxy, taskflow_id, 'complete')
@cli.command()
@test_case
def test_linked_taskflow(ctx, proxy):
"""Test a linked taskflow"""
logging.info('Running linked taskflow ...')
taskflow_id = create_taskflow(
proxy, 'cumulus.taskflow.core.test.mytaskflows.LinkTaskFlow')
# Start the task flow
proxy.put('taskflows/%s/start' % (taskflow_id))
wait_for_taskflow_status(proxy, taskflow_id, 'complete')
@cli.command()
@test_case
def test_terminate_taskflow(ctx, proxy):
"""Test terminating a taskflow"""
# Test terminating a simple flow
logging.info('Running simple taskflow ...')
taskflow_id = create_taskflow(
proxy, 'cumulus.taskflow.core.test.mytaskflows.SimpleTaskFlow')
# Start the task flow
proxy.put('taskflows/%s/start' % (taskflow_id))
time.sleep(4)
logging.info('Terminate the taskflow')
proxy.put('taskflows/%s/terminate' % (taskflow_id))
# Wait for it to terminate
wait_for_taskflow_status(proxy, taskflow_id, 'terminated')
# Now delete it
logging.info('Delete the taskflow')
try:
proxy.delete('taskflows/%s' % (taskflow_id))
except girder_client.HttpError as ex:
if ex.status != 202:
raise
# Wait for it to delete
try:
wait_for_taskflow_status(proxy, taskflow_id, 'deleted')
except girder_client.HttpError as ex:
if ex.status != 400:
raise
@cli.command()
@test_case
def test_chord_taskflow(ctx, proxy):
"""Test running a taskflow with a chord"""
# Now try something with a chord
logging.info('Running taskflow containing a chord ...')
taskflow_id = create_taskflow(
proxy, 'cumulus.taskflow.core.test.mytaskflows.ChordTaskFlow')
# Start the task flow
proxy.put('taskflows/%s/start' % (taskflow_id))
wait_for_taskflow_status(proxy, taskflow_id, 'complete')
@cli.command()
@test_case
def test_connected_taskflow(ctx, proxy):
"""Test a connected taskflow"""
# Now try a workflow that is the two connected together
logging.info('Running taskflow that connects to parts together ...')
taskflow_id = create_taskflow(
proxy, 'cumulus.taskflow.core.test.mytaskflows.ConnectTwoTaskFlow')
# Start the task flow
proxy.put('taskflows/%s/start' % (taskflow_id))
# Wait for it to complete
wait_for_taskflow_status(proxy, taskflow_id, 'complete')
@cli.command()
@test_case
def test_taskflow(ctx, proxy):
"""Run all taskflow tests"""
ctx.invoke(test_simple_taskflow)
ctx.invoke(test_linked_taskflow)
ctx.invoke(test_terminate_taskflow)
ctx.invoke(test_chord_taskflow)
ctx.invoke(test_connected_taskflow)
# # Now try a composite workflow approach ...
# print ('Running taskflow that is a composite ...')
# taskflow_id = create_taskflow(
# client, 'cumulus.taskflow.core.test.mytaskflows.MyCompositeTaskFlow')
#
# # Start the task flow
# url = 'taskflows/%s/start' % (taskflow_id)
# client.put(url)
#
# # Wait for it to complete
# wait_for_complete(client, taskflow_id)
###############################################################################
# Traditional Cluster Test
#
@cli.command()
@click.option('--profile_section', default='profile')
@click.option('--cluster_section', default='traditional')
@click.option('--host', default=None)
@click.option('--port', default=None)
@test_case
def test_traditional(ctx, proxy, profile_section, cluster_section, host, port):
"""Test creating a traditional cluster"""
from StringIO import StringIO
logging.info('Starting traditional cluster test...')
proxy.cluster_section = cluster_section
proxy.profile_section = profile_section
invoke = invoke_with_clean_proxy(ctx, proxy)
if host is not None:
proxy.cluster_host = host
if port is not None:
proxy.cluster_port = port
invoke(create_profile)
invoke(create_cluster)
proxy.wait_for_status('clusters/%s/status' % proxy.cluster['_id'],
'created', timeout=60)
# Re-request the cluster
proxy._cluster = None
# # Check cluster has key location
assert 'config' in proxy.cluster
assert 'ssh' in proxy.cluster['config']
assert 'publicKey' in proxy.cluster['config']['ssh']
key = proxy.cluster['config']['ssh']['publicKey']
# Create SSH Client
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.load_system_host_keys()
client.connect(proxy.cluster_host,
username=proxy.cluster_user, look_for_keys=True)
# Add key on 'cluster' machine
_, stdout, stderr = client.exec_command('echo "%s" >> ~/.ssh/authorized_keys' % key)
assert bool(stdout.read()) == False
assert bool(stderr.read()) == False
proxy.put('clusters/%s/start' % proxy.cluster['_id'])
proxy.wait_for_status('clusters/%s/status' % proxy.cluster['_id'],
'running', timeout=60)
# Create Script
commands = ['sleep 10', 'cat CumulusIntegrationTestInput']
r = proxy.post('scripts', data=json.dumps({
'commands': commands,
'name': 'CumulusIntegrationTestLob'
}))
script_id = r['_id']
# Create Input
data = 'Need more input!'
input_folder_id = proxy.get_folder_id('Private/CumulusInput')
## Create the input item
proxy.client.uploadFile(
input_folder_id, StringIO(data), 'CumulusIntegrationTestInput',
len(data), parentType='folder')
# Create Output Folder
output_folder_id = proxy.get_folder_id('Private/CumulusOutput')
# Create Job
job = proxy.client.post('jobs', data=json.dumps({
'name': 'CumulusIntegrationTestJob',
'scriptId': script_id,
'output': [{
'folderId': output_folder_id,
'path': '.'
}],
'input': [{
'folderId': input_folder_id,
'path': '.'
}]
}))
# Submit Job
proxy.client.put('clusters/%s/job/%s/submit' %
(proxy.cluster['_id'], job['_id']))
proxy.wait_for_status('jobs/%s' % job['_id'],
'complete', timeout=60,
log_url='jobs/%s/log' % job['_id'])
# Assert output
r = proxy.client.listItem(output_folder_id)
assert len(r) == 4
# Clean up
invoke(delete_cluster)
invoke(delete_profile)
if __name__ == '__main__':
try:
cli()
except SystemExit:
pass
report()
| |
import importlib
import os
import subprocess
import time
import re
import pprint
import sys
import traceback
class FrameworkTest:
##########################################################################################
# Class variables
##########################################################################################
headers_template = "-H 'Host: localhost' -H '{accept}' -H 'Connection: keep-alive'"
headers_full_template = "-H 'Host: localhost' -H '{accept}' -H 'Accept-Language: en-US,en;q=0.5' -H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) Gecko/20130501 Firefox/30.0 AppleWebKit/600.00 Chrome/30.0.0000.0 Trident/10.0 Safari/600.00' -H 'Cookie: uid=12345678901234567890; __utma=1.1234567890.1234567890.1234567890.1234567890.12; wd=2560x1600' -H 'Connection: keep-alive'"
accept_json = "Accept: application/json,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
accept_html = "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
accept_plaintext = "Accept: text/plain,text/html;q=0.9,application/xhtml+xml;q=0.9,application/xml;q=0.8,*/*;q=0.7"
concurrency_template = """
echo ""
echo "---------------------------------------------------------"
echo " Running Primer {name}"
echo " {wrk} {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}\""
echo "---------------------------------------------------------"
echo ""
{wrk} {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}"
sleep 5
echo ""
echo "---------------------------------------------------------"
echo " Running Warmup {name}"
echo " {wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}\""
echo "---------------------------------------------------------"
echo ""
{wrk} {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}"
sleep 5
for c in {interval}
do
echo ""
echo "---------------------------------------------------------"
echo " Concurrency: $c for {name}"
echo " {wrk} {headers} {pipeline} -d {duration} -c $c -t $(($c>{max_threads}?{max_threads}:$c)) \"http://{server_host}:{port}{url}\""
echo "---------------------------------------------------------"
echo ""
{wrk} {headers} {pipeline} -d {duration} -c "$c" -t "$(($c>{max_threads}?{max_threads}:$c))" http://{server_host}:{port}{url}
sleep 2
done
"""
query_template = """
echo ""
echo "---------------------------------------------------------"
echo " Running Primer {name}"
echo " wrk {headers} -d 5 -c 8 -t 8 \"http://{server_host}:{port}{url}2\""
echo "---------------------------------------------------------"
echo ""
wrk {headers} -d 5 -c 8 -t 8 "http://{server_host}:{port}{url}2"
sleep 5
echo ""
echo "---------------------------------------------------------"
echo " Running Warmup {name}"
echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}2\""
echo "---------------------------------------------------------"
echo ""
wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}2"
sleep 5
for c in {interval}
do
echo ""
echo "---------------------------------------------------------"
echo " Queries: $c for {name}"
echo " wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} \"http://{server_host}:{port}{url}$c\""
echo "---------------------------------------------------------"
echo ""
wrk {headers} -d {duration} -c {max_concurrency} -t {max_threads} "http://{server_host}:{port}{url}$c"
sleep 2
done
"""
language = None
platform = None
webserver = None
classification = None
database = None
approach = None
orm = None
framework = None
os = None
database_os = None
display_name = None
notes = None
versus = None
##########################################################################################
# Public Methods
##########################################################################################
############################################################
# start(benchmarker)
# Start the test using it's setup file
############################################################
def start(self, out, err):
return self.setup_module.start(self.benchmarker, out, err)
############################################################
# End start
############################################################
############################################################
# stop(benchmarker)
# Stops the test using it's setup file
############################################################
def stop(self, out, err):
return self.setup_module.stop(out, err)
############################################################
# End stop
############################################################
############################################################
# verify_urls
# Verifys each of the URLs for this test. THis will sinply
# curl the URL and check for it's return status.
# For each url, a flag will be set on this object for whether
# or not it passed
############################################################
def verify_urls(self, out, err):
# JSON
try:
out.write( "VERIFYING JSON (" + self.json_url + ") ...\n" )
out.flush()
url = self.benchmarker.generate_url(self.json_url, self.port)
self.__curl_url(url, out, err)
self.json_url_passed = True
except (AttributeError, subprocess.CalledProcessError) as e:
self.json_url_passed = False
# DB
try:
out.write( "VERIFYING DB (" + self.db_url + ") ...\n" )
out.flush()
url = self.benchmarker.generate_url(self.db_url, self.port)
self.__curl_url(url, out, err)
self.db_url_passed = True
except (AttributeError, subprocess.CalledProcessError) as e:
self.db_url_passed = False
# Query
try:
out.write( "VERIFYING Query (" + self.query_url + "2) ...\n" )
out.flush()
url = self.benchmarker.generate_url(self.query_url + "2", self.port)
self.__curl_url(url, out, err)
self.query_url_passed = True
except (AttributeError, subprocess.CalledProcessError) as e:
self.query_url_passed = False
# Fortune
try:
out.write( "VERIFYING Fortune (" + self.fortune_url + ") ...\n" )
out.flush()
url = self.benchmarker.generate_url(self.fortune_url, self.port)
self.__curl_url(url, out, err)
self.fortune_url_passed = True
except (AttributeError, subprocess.CalledProcessError) as e:
self.fortune_url_passed = False
# Update
try:
out.write( "VERIFYING Update (" + self.update_url + "2) ...\n" )
out.flush()
url = self.benchmarker.generate_url(self.update_url + "2", self.port)
self.__curl_url(url, out, err)
self.update_url_passed = True
except (AttributeError, subprocess.CalledProcessError) as e:
self.update_url_passed = False
# plaintext
try:
out.write( "VERIFYING Plaintext (" + self.plaintext_url + ") ...\n" )
out.flush()
url = self.benchmarker.generate_url(self.plaintext_url, self.port)
self.__curl_url(url, out, err)
self.plaintext_url_passed = True
except (AttributeError, subprocess.CalledProcessError) as e:
self.plaintext_url_passed = False
############################################################
# End verify_urls
############################################################
############################################################
# contains_type(type)
# true if this test contains an implementation of the given
# test type (json, db, etc.)
############################################################
def contains_type(self, type):
try:
if type == 'json' and self.json_url != None:
return True
if type == 'db' and self.db_url != None:
return True
if type == 'query' and self.query_url != None:
return True
if type == 'fortune' and self.fortune_url != None:
return True
if type == 'update' and self.update_url != None:
return True
if type == 'plaintext' and self.plaintext_url != None:
return True
except AttributeError:
pass
return False
############################################################
# End stop
############################################################
############################################################
# benchmark
# Runs the benchmark for each type of test that it implements
# JSON/DB/Query.
############################################################
def benchmark(self, out, err):
# JSON
try:
if self.json_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "json"):
out.write("BENCHMARKING JSON ... ")
out.flush()
remote_script = self.__generate_concurrency_script(self.json_url, self.port, self.accept_json)
self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'json'), err)
results = self.__parse_test('json')
self.benchmarker.report_results(framework=self, test="json", results=results['results'])
out.write( "Complete\n" )
out.flush()
except AttributeError:
pass
# DB
try:
if self.db_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "db"):
out.write("BENCHMARKING DB ... ")
out.flush()
remote_script = self.__generate_concurrency_script(self.db_url, self.port, self.accept_json)
self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'db'), err)
results = self.__parse_test('db')
self.benchmarker.report_results(framework=self, test="db", results=results['results'])
out.write( "Complete\n" )
except AttributeError:
traceback.print_exc()
pass
# Query
try:
if self.query_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "query"):
out.write("BENCHMARKING Query ... ")
out.flush()
remote_script = self.__generate_query_script(self.query_url, self.port, self.accept_json)
self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'query'), err)
results = self.__parse_test('query')
self.benchmarker.report_results(framework=self, test="query", results=results['results'])
out.write( "Complete\n" )
out.flush()
except AttributeError:
traceback.print_exc()
pass
# fortune
try:
if self.fortune_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "fortune"):
out.write("BENCHMARKING Fortune ... ")
out.flush()
remote_script = self.__generate_concurrency_script(self.fortune_url, self.port, self.accept_html)
self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'fortune'), err)
results = self.__parse_test('fortune')
self.benchmarker.report_results(framework=self, test="fortune", results=results['results'])
out.write( "Complete\n" )
out.flush()
except AttributeError:
traceback.print_exc()
pass
# update
try:
if self.update_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "update"):
out.write("BENCHMARKING Update ... ")
out.flush()
remote_script = self.__generate_query_script(self.update_url, self.port, self.accept_json)
self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'update'), err)
results = self.__parse_test('update')
self.benchmarker.report_results(framework=self, test="update", results=results['results'])
out.write( "Complete\n" )
out.flush()
except AttributeError:
# TODO - this needs to report some logging
traceback.print_exc()
pass
# plaintext
try:
if self.plaintext_url_passed and (self.benchmarker.type == "all" or self.benchmarker.type == "plaintext"):
out.write("BENCHMARKING Plaintext ... ")
out.flush()
remote_script = self.__generate_concurrency_script(self.plaintext_url, self.port, self.accept_plaintext, wrk_command="wrk-pipeline", intervals=[256,1024,4096,16384], pipeline="--pipeline 16")
self.__run_benchmark(remote_script, self.benchmarker.output_file(self.name, 'plaintext'), err)
results = self.__parse_test('plaintext')
self.benchmarker.report_results(framework=self, test="plaintext", results=results['results'])
out.write( "Complete\n" )
out.flush()
except AttributeError:
traceback.print_exc()
pass
############################################################
# End benchmark
############################################################
############################################################
# parse_all
# Method meant to be run for a given timestamp
############################################################
def parse_all(self):
# JSON
if os.path.exists(self.benchmarker.output_file(self.name, 'json')):
results = self.__parse_test('json')
self.benchmarker.report_results(framework=self, test="json", results=results['results'])
# DB
if os.path.exists(self.benchmarker.output_file(self.name, 'db')):
results = self.__parse_test('db')
self.benchmarker.report_results(framework=self, test="db", results=results['results'])
# Query
if os.path.exists(self.benchmarker.output_file(self.name, 'query')):
results = self.__parse_test('query')
self.benchmarker.report_results(framework=self, test="query", results=results['results'])
# Fortune
if os.path.exists(self.benchmarker.output_file(self.name, 'fortune')):
results = self.__parse_test('fortune')
self.benchmarker.report_results(framework=self, test="fortune", results=results['results'])
# Update
if os.path.exists(self.benchmarker.output_file(self.name, 'update')):
results = self.__parse_test('update')
self.benchmarker.report_results(framework=self, test="update", results=results['results'])
# Plaintext
if os.path.exists(self.benchmarker.output_file(self.name, 'plaintext')):
results = self.__parse_test('plaintext')
self.benchmarker.report_results(framework=self, test="plaintext", results=results['results'])
############################################################
# End parse_all
############################################################
############################################################
# __parse_test(test_type)
############################################################
def __parse_test(self, test_type):
try:
results = dict()
results['results'] = []
with open(self.benchmarker.output_file(self.name, test_type)) as raw_data:
is_warmup = True
rawData = None
for line in raw_data:
if "Queries:" in line or "Concurrency:" in line:
is_warmup = False
rawData = None
continue
if "Warmup" in line or "Primer" in line:
is_warmup = True
continue
if not is_warmup:
if rawData == None:
rawData = dict()
results['results'].append(rawData)
#if "Requests/sec:" in line:
# m = re.search("Requests/sec:\s+([0-9]+)", line)
# rawData['reportedResults'] = m.group(1)
# search for weighttp data such as succeeded and failed.
if "Latency" in line:
m = re.findall("([0-9]+\.*[0-9]*[us|ms|s|m|%]+)", line)
if len(m) == 4:
rawData['latencyAvg'] = m[0]
rawData['latencyStdev'] = m[1]
rawData['latencyMax'] = m[2]
# rawData['latencyStdevPercent'] = m[3]
#if "Req/Sec" in line:
# m = re.findall("([0-9]+\.*[0-9]*[k|%]*)", line)
# if len(m) == 4:
# rawData['requestsAvg'] = m[0]
# rawData['requestsStdev'] = m[1]
# rawData['requestsMax'] = m[2]
# rawData['requestsStdevPercent'] = m[3]
#if "requests in" in line:
# m = re.search("requests in ([0-9]+\.*[0-9]*[ms|s|m|h]+)", line)
# if m != None:
# # parse out the raw time, which may be in minutes or seconds
# raw_time = m.group(1)
# if "ms" in raw_time:
# rawData['total_time'] = float(raw_time[:len(raw_time)-2]) / 1000.0
# elif "s" in raw_time:
# rawData['total_time'] = float(raw_time[:len(raw_time)-1])
# elif "m" in raw_time:
# rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 60.0
# elif "h" in raw_time:
# rawData['total_time'] = float(raw_time[:len(raw_time)-1]) * 3600.0
if "requests in" in line:
m = re.search("([0-9]+) requests in", line)
if m != None:
rawData['totalRequests'] = int(m.group(1))
if "Socket errors" in line:
if "connect" in line:
m = re.search("connect ([0-9]+)", line)
rawData['connect'] = int(m.group(1))
if "read" in line:
m = re.search("read ([0-9]+)", line)
rawData['read'] = int(m.group(1))
if "write" in line:
m = re.search("write ([0-9]+)", line)
rawData['write'] = int(m.group(1))
if "timeout" in line:
m = re.search("timeout ([0-9]+)", line)
rawData['timeout'] = int(m.group(1))
if "Non-2xx" in line:
m = re.search("Non-2xx or 3xx responses: ([0-9]+)", line)
if m != None:
rawData['5xx'] = int(m.group(1))
return results
except IOError:
return None
############################################################
# End benchmark
############################################################
##########################################################################################
# Private Methods
##########################################################################################
############################################################
# __run_benchmark(script, output_file)
# Runs a single benchmark using the script which is a bash
# template that uses weighttp to run the test. All the results
# outputed to the output_file.
############################################################
def __run_benchmark(self, script, output_file, err):
with open(output_file, 'w') as raw_file:
p = subprocess.Popen(self.benchmarker.client_ssh_string.split(" "), stdin=subprocess.PIPE, stdout=raw_file, stderr=err)
p.communicate(script)
err.flush()
############################################################
# End __run_benchmark
############################################################
############################################################
# __generate_concurrency_script(url, port)
# Generates the string containing the bash script that will
# be run on the client to benchmark a single test. This
# specifically works for the variable concurrency tests (JSON
# and DB)
############################################################
def __generate_concurrency_script(self, url, port, accept_header, wrk_command="wrk", intervals=[], pipeline=""):
if len(intervals) == 0:
intervals = self.benchmarker.concurrency_levels
headers = self.__get_request_headers(accept_header)
return self.concurrency_template.format(max_concurrency=self.benchmarker.max_concurrency,
max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
interval=" ".join("{}".format(item) for item in intervals),
server_host=self.benchmarker.server_host, port=port, url=url, headers=headers, wrk=wrk_command,
pipeline=pipeline)
############################################################
# End __generate_concurrency_script
############################################################
############################################################
# __generate_query_script(url, port)
# Generates the string containing the bash script that will
# be run on the client to benchmark a single test. This
# specifically works for the variable query tests (Query)
############################################################
def __generate_query_script(self, url, port, accept_header):
headers = self.__get_request_headers(accept_header)
return self.query_template.format(max_concurrency=self.benchmarker.max_concurrency,
max_threads=self.benchmarker.max_threads, name=self.name, duration=self.benchmarker.duration,
interval=" ".join("{}".format(item) for item in self.benchmarker.query_intervals),
server_host=self.benchmarker.server_host, port=port, url=url, headers=headers)
############################################################
# End __generate_query_script
############################################################
############################################################
# __get_request_headers(accept_header)
# Generates the complete HTTP header string
############################################################
def __get_request_headers(self, accept_header):
return self.headers_template.format(accept=accept_header)
############################################################
# End __format_request_headers
############################################################
############################################################
# __curl_url
# Dump HTTP response and headers. Throw exception if there
# is an HTTP error.
############################################################
def __curl_url(self, url, out, err):
# Use -i to output response with headers.
# Don't use -f so that the HTTP response code is ignored.
# Use --stderr - to redirect stderr to stdout so we get
# error output for sure in stdout.
# Use -sS to hide progress bar, but show errors.
subprocess.check_call(["curl", "-i", "-sS", url], stderr=err, stdout=out)
out.flush()
err.flush()
# HTTP output may not end in a newline, so add that here.
out.write( "\n" )
out.flush()
# In the curl invocation above we could not use -f because
# then the HTTP response would not be output, so use -f in
# an additional invocation so that if there is an HTTP error,
# subprocess.CalledProcessError will be thrown. Note that this
# uses check_output() instead of check_call() so that we can
# ignore the HTTP response because we already output that in
# the first curl invocation.
subprocess.check_output(["curl", "-fsS", url], stderr=err)
out.flush()
err.flush()
# HTTP output may not end in a newline, so add that here.
out.write( "\n" )
out.flush()
##############################################################
# End __curl_url
##############################################################
##########################################################################################
# Constructor
##########################################################################################
def __init__(self, name, directory, benchmarker, args):
self.name = name
self.directory = directory
self.benchmarker = benchmarker
self.__dict__.update(args)
# ensure directory has __init__.py file so that we can use it as a Python package
if not os.path.exists(os.path.join(directory, "__init__.py")):
open(os.path.join(directory, "__init__.py"), 'w').close()
self.setup_module = setup_module = importlib.import_module(directory + '.' + self.setup_file)
############################################################
# End __init__
############################################################
############################################################
# End FrameworkTest
############################################################
##########################################################################################
# Static methods
##########################################################################################
##############################################################
# parse_config(config, directory, benchmarker)
# parses a config file and returns a list of FrameworkTest
# objects based on that config file.
##############################################################
def parse_config(config, directory, benchmarker):
tests = []
# The config object can specify multiple tests, we neep to loop
# over them and parse them out
for test in config['tests']:
for key, value in test.iteritems():
test_name = config['framework']
# if the test uses the 'defualt' keywork, then we don't
# append anything to it's name. All configs should only have 1 default
if key != 'default':
# we need to use the key in the test_name
test_name = test_name + "-" + key
tests.append(FrameworkTest(test_name, directory, benchmarker, value))
return tests
##############################################################
# End parse_config
##############################################################
| |
"""
===================================
Column Transformer with Mixed Types
===================================
.. currentmodule:: sklearn
This example illustrates how to apply different preprocessing and feature
extraction pipelines to different subsets of features, using
:class:`~compose.ColumnTransformer`. This is particularly handy for the
case of datasets that contain heterogeneous data types, since we may want to
scale the numeric features and one-hot encode the categorical ones.
In this example, the numeric data is standard-scaled after mean-imputation,
while the categorical data is one-hot encoded after imputing missing values
with a new category (``'missing'``).
In addition, we show two different ways to dispatch the columns to the
particular pre-processor: by column names and by column data types.
Finally, the preprocessing pipeline is integrated in a full prediction pipeline
using :class:`~pipeline.Pipeline`, together with a simple classification
model.
"""
# Author: Pedro Morales <part.morales@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from sklearn.compose import ColumnTransformer
from sklearn.datasets import fetch_openml
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, GridSearchCV
np.random.seed(0)
# Load data from https://www.openml.org/d/40945
X, y = fetch_openml("titanic", version=1, as_frame=True, return_X_y=True)
# Alternatively X and y can be obtained directly from the frame attribute:
# X = titanic.frame.drop('survived', axis=1)
# y = titanic.frame['survived']
# %%
# Use ``ColumnTransformer`` by selecting column by names
###############################################################################
# We will train our classifier with the following features:
#
# Numeric Features:
#
# * ``age``: float;
# * ``fare``: float.
#
# Categorical Features:
#
# * ``embarked``: categories encoded as strings ``{'C', 'S', 'Q'}``;
# * ``sex``: categories encoded as strings ``{'female', 'male'}``;
# * ``pclass``: ordinal integers ``{1, 2, 3}``.
#
# We create the preprocessing pipelines for both numeric and categorical data.
# Note that ``pclass`` could either be treated as a categorical or numeric
# feature.
numeric_features = ['age', 'fare']
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = ['embarked', 'sex', 'pclass']
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('onehot', OneHotEncoder(handle_unknown='ignore'))])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
# Append classifier to preprocessing pipeline.
# Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LogisticRegression())])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=0)
clf.fit(X_train, y_train)
print("model score: %.3f" % clf.score(X_test, y_test))
# %%
# HTML representation of ``Pipeline``
###############################################################################
# When the ``Pipeline`` is printed out in a jupyter notebook an HTML
# representation of the estimator is displayed as follows:
from sklearn import set_config
set_config(display='diagram')
clf
# %%
# Use ``ColumnTransformer`` by selecting column by data types
###############################################################################
# When dealing with a cleaned dataset, the preprocessing can be automatic by
# using the data types of the column to decide whether to treat a column as a
# numerical or categorical feature.
# :func:`sklearn.compose.make_column_selector` gives this possibility.
# First, let's only select a subset of columns to simplify our
# example.
subset_feature = ['embarked', 'sex', 'pclass', 'age', 'fare']
X_train, X_test = X_train[subset_feature], X_test[subset_feature]
# %%
# Then, we introspect the information regarding each column data type.
X_train.info()
# %%
# We can observe that the `embarked` and `sex` columns were tagged as
# `category` columns when loading the data with ``fetch_openml``. Therefore, we
# can use this information to dispatch the categorical columns to the
# ``categorical_transformer`` and the remaining columns to the
# ``numerical_transformer``.
# %%
# .. note:: In practice, you will have to handle yourself the column data type.
# If you want some columns to be considered as `category`, you will have to
# convert them into categorical columns. If you are using pandas, you can
# refer to their documentation regarding `Categorical data
# <https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html>`_.
from sklearn.compose import make_column_selector as selector
preprocessor = ColumnTransformer(transformers=[
('num', numeric_transformer, selector(dtype_exclude="category")),
('cat', categorical_transformer, selector(dtype_include="category"))
])
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LogisticRegression())])
clf.fit(X_train, y_train)
print("model score: %.3f" % clf.score(X_test, y_test))
# %%
# The resulting score is not exactly the same as the one from the previous
# pipeline becase the dtype-based selector treats the ``pclass`` columns as
# a numeric features instead of a categorical feature as previously:
selector(dtype_exclude="category")(X_train)
# %%
selector(dtype_include="category")(X_train)
# %%
# Using the prediction pipeline in a grid search
##############################################################################
# Grid search can also be performed on the different preprocessing steps
# defined in the ``ColumnTransformer`` object, together with the classifier's
# hyperparameters as part of the ``Pipeline``.
# We will search for both the imputer strategy of the numeric preprocessing
# and the regularization parameter of the logistic regression using
# :class:`~sklearn.model_selection.GridSearchCV`.
param_grid = {
'preprocessor__num__imputer__strategy': ['mean', 'median'],
'classifier__C': [0.1, 1.0, 10, 100],
}
grid_search = GridSearchCV(clf, param_grid, cv=10)
grid_search
# %%
# Calling 'fit' triggers the cross-validated search for the best
# hyper-parameters combination:
#
grid_search.fit(X_train, y_train)
print(f"Best params:")
print(grid_search.best_params_)
# %%
# The internal cross-validation scores obtained by those parameters is:
print(f"Internal CV score: {grid_search.best_score_:.3f}")
# %%
# We can also introspect the top grid search results as a pandas dataframe:
import pandas as pd
cv_results = pd.DataFrame(grid_search.cv_results_)
cv_results = cv_results.sort_values("mean_test_score", ascending=False)
cv_results[["mean_test_score", "std_test_score",
"param_preprocessor__num__imputer__strategy",
"param_classifier__C"
]].head(5)
# %%
# The best hyper-parameters have be used to re-fit a final model on the full
# training set. We can evaluate that final model on held out test data that was
# not used for hyparameter tuning.
#
print(("best logistic regression from grid search: %.3f"
% grid_search.score(X_test, y_test)))
| |
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
import os
import logging
from streamalert.classifier.clients import SQSClient
from streamalert.shared.firehose import FirehoseClient
from streamalert.classifier.parsers import get_parser
from streamalert.classifier.payload.payload_base import StreamPayload
from streamalert.shared import config, CLASSIFIER_FUNCTION_NAME as FUNCTION_NAME
from streamalert.shared.artifact_extractor import ArtifactExtractor
from streamalert.shared.exceptions import ConfigError
from streamalert.shared.logger import get_logger
from streamalert.shared.metrics import MetricLogger
from streamalert.shared.normalize import Normalizer
LOGGER = get_logger(__name__)
LOGGER_DEBUG_ENABLED = LOGGER.isEnabledFor(logging.DEBUG)
class Classifier:
"""Classify, map source, and parse a raw record into its declared type."""
_config = None
_firehose_client = None
_sqs_client = None
def __init__(self):
# Create some objects to be cached if they have not already been created
Classifier._config = Classifier._config or config.load_config(validate=True)
Classifier._firehose_client = (
Classifier._firehose_client or FirehoseClient.load_from_config(
prefix=self.config['global']['account']['prefix'],
firehose_config=self.config['global'].get('infrastructure', {}).get('firehose', {}),
log_sources=self.config['logs']
)
)
Classifier._sqs_client = Classifier._sqs_client or SQSClient()
# Setup the normalization logic
Normalizer.load_from_config(self.config)
self._cluster = os.environ['CLUSTER']
self._payloads = []
self._failed_record_count = 0
self._processed_size = 0
@property
def config(self):
return Classifier._config
@property
def classified_payloads(self):
return self._payloads
@property
def firehose(self):
return Classifier._firehose_client
@property
def data_retention_enabled(self):
return Classifier._firehose_client is not None
@property
def sqs(self):
return Classifier._sqs_client
def _load_logs_for_resource(self, service, resource):
"""Load the log types for this service type and resource value
Args:
service (str): Source service
resource (str): Resource within the service
Returns:
bool: True if the resource's log sources loaded properly
"""
# Get all logs for the configured service/entity (s3, kinesis, or sns)
resources = self._config['clusters'][self._cluster]['data_sources'].get(service)
if not resources:
error = 'Service [{}] not declared in sources configuration for resource [{}]'.format(
service,
resource
)
raise ConfigError(error)
source_config = resources.get(resource)
if not source_config:
error = 'Resource [{}] not declared in sources configuration for service [{}]'.format(
resource,
service
)
raise ConfigError(error)
# Get the log schemas for source(s)
return OrderedDict(
(source, self.config['logs'][source])
for source in self.config['logs'].keys()
if source.split(':')[0] in source_config
)
@classmethod
def _process_log_schemas(cls, payload_record, logs_config):
"""Get any log schemas that matched this log format
If successful, this method sets the PayloadRecord.parser attribute to the parser
that was used to parse the data.
Args:
payload_record: A PayloadRecord object
logs_config: Subset of entire logs.json schemas to use for processing
Returns:
bool: True if the payload's data was successfully parsed, False otherwise
"""
# Loop over all logs schemas declared for this source
for log_type, options in logs_config.items():
LOGGER.debug('Trying schema \'%s\' with options: %s', log_type, options)
# Get the parser type to use for this log and set up the parser
parser = get_parser(options['parser'])(options, log_type=log_type)
parsed = parser.parse(payload_record.data)
if not parsed:
LOGGER.debug('Failed to classify data with schema: %s', log_type)
continue
LOGGER.debug('Log classified with schema: %s', log_type)
# Set the parser on successful parse
payload_record.parser = parser
return True
return False # unable to parse this record
def _classify_payload(self, payload):
"""Run the payload through the classification logic to determine the data type
Args:
payload (StreamPayload): StreamAlert payload object being processed
"""
# Get logs defined for the service/entity in the config
logs_config = self._load_logs_for_resource(payload.service(), payload.resource)
if not logs_config:
LOGGER.error(
'No log types defined for resource [%s] in sources configuration for service [%s]',
payload.resource,
payload.service()
)
return
for record in payload.pre_parse():
# Increment the processed size using the length of this record
self._processed_size += len(record)
# Get the parser for this data
self._process_log_schemas(record, logs_config)
LOGGER.debug('Parsed and classified payload: %s', bool(record))
payload.fully_classified = payload.fully_classified and record
if not record:
self._log_bad_records(record, 1)
continue
LOGGER.debug(
'Classified %d record(s) with schema: %s',
len(record.parsed_records),
record.log_schema_type
)
# Even if the parser was successful, there's a chance it
# could not parse all records, so log them here as invalid
self._log_bad_records(record, len(record.invalid_records))
for parsed_rec in record.parsed_records:
#
# In Normalization v1, the normalized types are defined based on log source
# (e.g. osquery, cloudwatch etc) and this will be deprecated.
# In Normalization v2, the normalized types are defined based on log type
# (e.g. osquery:differential, cloudwatch:cloudtrail, cloudwatch:events etc)
#
Normalizer.normalize(parsed_rec, record.log_schema_type)
self._payloads.append(record)
def _log_bad_records(self, payload_record, invalid_record_count):
"""Log the contents of bad records to output so they can be handled
Args:
payload_record (PayloadRecord): PayloadRecord instance that, when logged to output,
prints some information that will be helpful for debugging bad data
invalid_record_count (int): Number of invalid records to increment the count by
"""
if not invalid_record_count:
return # don't log anything if the count of invalid records is not > 0
LOGGER.error('Record does not match any defined schemas: %s', payload_record)
self._failed_record_count += invalid_record_count
def _log_metrics(self):
"""Perform some metric logging before exiting"""
MetricLogger.log_metric(
FUNCTION_NAME,
MetricLogger.TOTAL_RECORDS,
sum(len(payload.parsed_records) for payload in self._payloads)
)
MetricLogger.log_metric(
FUNCTION_NAME,
MetricLogger.NORMALIZED_RECORDS,
sum(
1 for payload in self._payloads
for log in payload.parsed_records if log.get(Normalizer.NORMALIZATION_KEY)
)
)
MetricLogger.log_metric(
FUNCTION_NAME, MetricLogger.TOTAL_PROCESSED_SIZE, self._processed_size
)
LOGGER.debug('Invalid record count: %d', self._failed_record_count)
MetricLogger.log_metric(
FUNCTION_NAME, MetricLogger.FAILED_PARSES, self._failed_record_count
)
def run(self, records):
"""Run classificaiton of the records in the Lambda input
Args:
records (list): An list of records received by Lambda
"""
LOGGER.debug('Number of incoming records: %d', len(records))
if not records:
return
for input_record in records:
# Get the service and entity from the payload
payload = StreamPayload.load_from_raw_record(input_record)
if not payload:
self._log_bad_records(input_record, 1)
continue
self._classify_payload(payload)
self._log_metrics()
# Send records to SQS before sending to Firehose
self.sqs.send(self._payloads)
# Send the data to firehose for historical retention
if self.data_retention_enabled:
categorized_records = self.firehose.send(self._payloads)
# Extract artifacts if it is enabled
if config.artifact_extractor_enabled(self._config):
ArtifactExtractor(
self.firehose.artifacts_firehose_stream_name(self._config)
).run(categorized_records)
return self._payloads
| |
import gzip
import http.server
import threading
import unittest
import wfdb.io._url
class TestNetFiles(unittest.TestCase):
"""
Test accessing remote files.
"""
def test_requests(self):
"""
Test reading a remote file using various APIs.
This tests that we can create a file object using
wfdb.io._url.openurl(), and tests that the object implements
the standard Python API functions for a file of the
appropriate type.
Parameters
----------
N/A
Returns
-------
N/A
"""
text_data = """
BERNARDO: Who's there?
FRANCISCO: Nay, answer me: stand, and unfold yourself.
BERNARDO: Long live the king!
FRANCISCO: Bernardo?
BERNARDO: He.
FRANCISCO: You come most carefully upon your hour.
BERNARDO: 'Tis now struck twelve; get thee to bed, Francisco.
"""
binary_data = text_data.encode()
file_content = {'/foo.txt': binary_data}
# Test all possible combinations of:
# - whether or not the server supports compression
# - whether or not the server supports random access
# - chosen buffering policy
for allow_gzip in (False, True):
for allow_range in (False, True):
with DummyHTTPServer(file_content=file_content,
allow_gzip=allow_gzip,
allow_range=allow_range) as server:
url = server.url('/foo.txt')
for buffering in (-2, -1, 0, 20):
self._test_text(url, text_data, buffering)
self._test_binary(url, binary_data, buffering)
def _test_text(self, url, content, buffering):
"""
Test reading a URL using text-mode file APIs.
Parameters
----------
url : str
URL of the remote resource.
content : str
Expected content of the resource.
buffering : int
Buffering policy for openurl().
Returns
-------
N/A
"""
# read(-1), readable(), seekable()
with wfdb.io._url.openurl(url, 'r', buffering=buffering) as tf:
self.assertTrue(tf.readable())
self.assertTrue(tf.seekable())
self.assertEqual(tf.read(), content)
self.assertEqual(tf.read(), '')
# read(10)
with wfdb.io._url.openurl(url, 'r', buffering=buffering) as tf:
result = ''
while True:
chunk = tf.read(10)
result += chunk
if len(chunk) < 10:
break
self.assertEqual(result, content)
# readline(), seek(), tell()
with wfdb.io._url.openurl(url, 'r', buffering=buffering) as tf:
result = ''
while True:
rpos = tf.tell()
tf.seek(0)
tf.seek(rpos)
chunk = tf.readline()
result += chunk
if len(chunk) == 0:
break
self.assertEqual(result, content)
def _test_binary(self, url, content, buffering):
"""
Test reading a URL using binary-mode file APIs.
Parameters
----------
url : str
URL of the remote resource.
content : bytes
Expected content of the resource.
buffering : int
Buffering policy for openurl().
Returns
-------
N/A
"""
# read(-1), readable(), seekable()
with wfdb.io._url.openurl(url, 'rb', buffering=buffering) as bf:
self.assertTrue(bf.readable())
self.assertTrue(bf.seekable())
self.assertEqual(bf.read(), content)
self.assertEqual(bf.read(), b'')
self.assertEqual(bf.tell(), len(content))
# read(10)
with wfdb.io._url.openurl(url, 'rb', buffering=buffering) as bf:
result = b''
while True:
chunk = bf.read(10)
result += chunk
if len(chunk) < 10:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
# readline()
with wfdb.io._url.openurl(url, 'rb', buffering=buffering) as bf:
result = b''
while True:
chunk = bf.readline()
result += chunk
if len(chunk) == 0:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
# read1(10), seek(), tell()
with wfdb.io._url.openurl(url, 'rb', buffering=buffering) as bf:
bf.seek(0, 2)
self.assertEqual(bf.tell(), len(content))
bf.seek(0)
result = b''
while True:
rpos = bf.tell()
bf.seek(0)
bf.seek(rpos)
chunk = bf.read1(10)
result += chunk
if len(chunk) == 0:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
# readinto(bytearray(10))
with wfdb.io._url.openurl(url, 'rb', buffering=buffering) as bf:
result = b''
chunk = bytearray(10)
while True:
count = bf.readinto(chunk)
result += chunk[:count]
if count < 10:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
# readinto1(bytearray(10))
with wfdb.io._url.openurl(url, 'rb', buffering=buffering) as bf:
result = b''
chunk = bytearray(10)
while True:
count = bf.readinto1(chunk)
result += chunk[:count]
if count == 0:
break
self.assertEqual(result, content)
self.assertEqual(bf.tell(), len(content))
class DummyHTTPServer(http.server.HTTPServer):
"""
HTTPServer used to simulate a web server for testing.
The server may be used as a context manager (using "with"); during
execution of the "with" block, a background thread runs that
listens for and handles client requests.
Attributes
----------
file_content : dict
Dictionary containing the content of each file on the server.
The keys are absolute paths (such as "/foo.txt"); the values
are the corresponding content (bytes).
allow_gzip : bool, optional
True if the server should return compressed responses (using
"Content-Encoding: gzip") when the client requests them (using
"Accept-Encoding: gzip").
allow_range : bool, optional
True if the server should return partial responses (using 206
Partial Content and "Content-Range") when the client requests
them (using "Range").
server_address : tuple (str, int), optional
A tuple specifying the address and port number where the
server should listen for connections. If the port is 0, an
arbitrary unused port is selected. The default address is
"127.0.0.1" and the default port is 0.
"""
def __init__(self, file_content, allow_gzip=True, allow_range=True,
server_address=('127.0.0.1', 0)):
super().__init__(server_address, DummyHTTPRequestHandler)
self.file_content = file_content
self.allow_gzip = allow_gzip
self.allow_range = allow_range
def url(self, path='/'):
"""
Generate a URL that points to a file on this server.
Parameters
----------
path : str, optional
Path of the file on the server.
Returns
-------
url : str
Absolute URL for the specified file.
"""
return 'http://127.0.0.1:%d/%s' % (self.server_address[1],
path.lstrip('/'))
def __enter__(self):
super().__enter__()
self.thread = threading.Thread(target=self.serve_forever)
self.thread.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown()
self.thread.join()
self.thread = None
return super().__exit__(exc_type, exc_val, exc_tb)
class DummyHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
"""
HTTPRequestHandler used to simulate a web server for testing.
"""
def do_HEAD(self):
self.send_head()
def do_GET(self):
body = self.send_head()
self.wfile.write(body)
def log_message(self, message, *args):
pass
def send_head(self):
content = self.server.file_content.get(self.path)
if content is None:
self.send_error(404)
return b''
headers = {'Content-Type': 'text/plain'}
status = 200
if self.server.allow_gzip:
headers['Vary'] = 'Accept-Encoding'
if 'gzip' in self.headers.get('Accept-Encoding', ''):
content = gzip.compress(content)
headers['Content-Encoding'] = 'gzip'
if self.server.allow_range:
headers['Accept-Ranges'] = 'bytes'
req_range = self.headers.get('Range', '')
if req_range.startswith('bytes='):
start, end = req_range.split('=')[1].split('-')
start = int(start)
if end == '':
end = len(content)
else:
end = min(len(content), int(end) + 1)
if start < end:
status = 206
resp_range = 'bytes %d-%d/%d' % (
start, end - 1, len(content))
content = content[start:end]
else:
status = 416
resp_range = 'bytes */%d' % len(content)
content = b''
headers['Content-Range'] = resp_range
headers['Content-Length'] = len(content)
self.send_response(status)
for h, v in sorted(headers.items()):
self.send_header(h, v)
self.end_headers()
return content
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Simple HTTP server which receives test results from the build slaves and
# stores them in a MySQL database. The test logs are also stored in an S3 bucket.
#
# Configuration here is done via environment variables:
#
# MySQL config:
# MYSQLHOST - host running mysql
# MYSQLPORT - port of mysql [optional]
# MYSQLUSER - username
# MYSQLPWD - password
# MYSQLDB - mysql database
#
# S3 config:
# AWS_ACCESS_KEY - AWS access key
# AWS_SECRET_KEY - AWS secret key
# TEST_RESULT_BUCKET - bucket to store results in (eg 'kudu-test-results')
#
# If the AWS credentials are not configured, falls back to using Boto's
# default configuration (http://boto.cloudhackers.com/en/latest/boto_config_tut.html)
#
# Installation instructions:
# You probably want to run this inside a virtualenv to avoid having
# to install python modules systemwide. For example:
# $ virtualenv ~/flaky-test-server-env/
# $ . ~/flaky-test-server-env/bin/activate
# $ pip install boto
# $ pip install jinja2
# $ pip install cherrypy
# $ pip install MySQL-python
import boto
import cherrypy
import gzip
import itertools
from jinja2 import Template
import logging
import MySQLdb
import os
import parse_test_failure
from StringIO import StringIO
import threading
import uuid
def percent_rate(num, denom):
if denom == 0:
return 0
return num/denom * 100
class TRServer(object):
def __init__(self):
self.thread_local = threading.local()
self.ensure_table()
self.s3 = self.connect_s3()
self.s3_bucket = self.s3.get_bucket(os.environ["TEST_RESULT_BUCKET"])
def connect_s3(self):
access_key = os.environ.get("AWS_ACCESS_KEY")
secret_key = os.environ.get("AWS_SECRET_KEY")
s3 = boto.connect_s3(access_key, secret_key)
logging.info("Connected to S3 with access key %s" % access_key)
return s3
def upload_to_s3(self, key, fp, filename):
k = boto.s3.key.Key(self.s3_bucket)
k.key = key
# The Content-Disposition header sets the filename that the browser
# will use to download this.
# We have to cast to str() here, because boto will try to escape the header
# incorrectly if you pass a unicode string.
k.set_metadata('Content-Disposition', str('inline; filename=%s' % filename))
k.set_contents_from_string(fp.read(),
reduced_redundancy=True)
def connect_mysql(self):
if hasattr(self.thread_local, "db") and \
self.thread_local.db is not None:
return self.thread_local.db
host = os.environ["MYSQLHOST"]
port = int(os.environ.get("MYSQLPORT", "3306"))
user = os.environ["MYSQLUSER"]
pwd = os.environ["MYSQLPWD"]
db = os.environ["MYSQLDB"]
self.thread_local.db = MySQLdb.connect(host, user, pwd, db, port=port)
self.thread_local.db.autocommit(True)
logging.info("Connected to MySQL at %s" % host)
return self.thread_local.db
def execute_query(self, query, *args):
""" Execute a query, automatically reconnecting on disconnection. """
# We'll try up to 3 times to reconnect
MAX_ATTEMPTS = 3
# Error code for the "MySQL server has gone away" error.
MYSQL_SERVER_GONE_AWAY = 2006
attempt_num = 0
while True:
c = self.connect_mysql().cursor(MySQLdb.cursors.DictCursor)
attempt_num = attempt_num + 1
try:
c.execute(query, *args)
return c
except MySQLdb.OperationalError as err:
if err.args[0] == MYSQL_SERVER_GONE_AWAY and attempt_num < MAX_ATTEMPTS:
logging.warn("Forcing reconnect to MySQL: %s" % err)
self.thread_local.db = None
continue
else:
raise
def ensure_table(self):
c = self.execute_query("""
CREATE TABLE IF NOT EXISTS test_results (
id int not null auto_increment primary key,
timestamp timestamp not null default current_timestamp,
build_id varchar(100),
revision varchar(50),
build_config varchar(100),
hostname varchar(255),
test_name varchar(100),
status int,
log_key char(40),
INDEX (test_name, timestamp),
INDEX (timestamp)
);""")
@cherrypy.expose
def index(self):
return "Welcome to the test result server!"
@cherrypy.expose
def add_result(self, **kwargs):
args = {}
args.update(kwargs)
# Only upload the log if it's provided.
if 'log' in kwargs:
log = kwargs['log']
s3_id = uuid.uuid1()
self.upload_to_s3(s3_id, log.file, log.filename)
else:
s3_id = None
args['log_key'] = s3_id
logging.info("Handling report: %s" % repr(args))
self.execute_query(
"INSERT INTO test_results(build_id, revision, build_config, hostname, test_name, status, log_key) "
"VALUES (%(build_id)s, %(revision)s, %(build_config)s, %(hostname)s, %(test_name)s,"
"%(status)s, %(log_key)s)",
args)
return "Success!\n"
@cherrypy.expose
def download_log(self, key):
expiry = 60 * 60 * 24 # link should last 1 day
k = boto.s3.key.Key(self.s3_bucket)
k.key = key
raise cherrypy.HTTPRedirect(k.generate_url(expiry))
@cherrypy.expose
def diagnose(self, key):
k = boto.s3.key.Key(self.s3_bucket)
k.key = key
log_text_gz = k.get_contents_as_string()
encoded_text = gzip.GzipFile(fileobj=StringIO(log_text_gz)).read()
# Ignore errors in decoding, as logs may contain binary data.
log_text = encoded_text.decode('utf-8', 'ignore')
summary = parse_test_failure.extract_failure_summary(log_text)
if not summary:
summary = "Unable to diagnose"
template = Template("""
<h1>Diagnosed failure</h1>
<code><pre>{{ summary|e }}</pre></code>
<h1>Full log</h1>
<code><pre>{{ log_text|e }}</pre></code>
""")
return self.render_container(template.render(summary=summary, log_text=log_text))
def recently_failed_html(self):
""" Return an HTML report of recently failed tests """
c = self.execute_query(
"SELECT * from test_results WHERE status != 0 "
"AND timestamp > NOW() - INTERVAL 1 WEEK "
"ORDER BY timestamp DESC LIMIT 50")
failed_tests = c.fetchall()
prev_date = None
for t in failed_tests:
t['is_new_date'] = t['timestamp'].date() != prev_date
prev_date = t['timestamp'].date()
template = Template("""
<h1>50 most recent failures</h1>
<table class="table">
<tr>
<th>test</th>
<th>config</th>
<th>exit code</th>
<th>rev</th>
<th>machine</th>
<th>time</th>
<th>build</th>
</tr>
{% for run in failed_tests %}
{% if run.is_new_date %}
<tr class="new-date">
<th colspan="7">{{ run.timestamp.date()|e }}</th>
</tr>
{% endif %}
<tr>
<td><a href="/test_drilldown?test_name={{ run.test_name |urlencode }}">
{{ run.test_name |e }}
</a></td>
<td>{{ run.build_config |e }}</td>
<td>{{ run.status |e }}
{% if run.log_key %}
<a href="/download_log?key={{ run.log_key |urlencode }}">failure log</a> |
<a href="/diagnose?key={{ run.log_key |urlencode }}">diagnose</a>
{% endif %}
</td>
<td>{{ run.revision |e }}</td>
<td>{{ run.hostname |e }}</td>
<td>{{ run.timestamp |e }}</td>
<td>{{ run.build_id |e }}</td>
</tr>
{% endfor %}
</table>
""")
return template.render(failed_tests=failed_tests)
def flaky_report_html(self):
""" Return an HTML report of recently flaky tests """
c = self.execute_query(
"""SELECT
test_name,
DATEDIFF(NOW(), timestamp) AS days_ago,
SUM(IF(status != 0, 1, 0)) AS num_failures,
COUNT(*) AS num_runs
FROM test_results
WHERE timestamp > NOW() - INTERVAL 1 WEEK
GROUP BY test_name, days_ago
HAVING num_failures > 0
ORDER BY test_name""")
rows = c.fetchall()
results = []
for test_name, test_rows in itertools.groupby(rows, lambda r: r['test_name']):
# Convert to list so we can consume it multiple times
test_rows = list(test_rows)
# Compute summary for last 7 days and last 2 days
runs_7day = sum(r['num_runs'] for r in test_rows)
failures_7day = sum(r['num_failures'] for r in test_rows)
runs_2day = sum(r['num_runs'] for r in test_rows if r['days_ago'] < 2)
failures_2day = sum(r['num_failures'] for r in test_rows if r['days_ago'] < 2)
# Compute a sparkline (percentage failure for each day)
sparkline = [0 for x in xrange(8)]
for r in test_rows:
if r['num_runs'] > 0:
percent = float(r['num_failures']) / r['num_runs'] * 100
else:
percent = 0
sparkline[7 - r['days_ago']] = percent
# Add to results list for tablet.
results.append(dict(test_name=test_name,
runs_7day=runs_7day,
failures_7day=failures_7day,
rate_7day=percent_rate(failures_7day, runs_7day),
runs_2day=runs_2day,
failures_2day=failures_2day,
rate_2day=percent_rate(failures_2day, runs_2day),
sparkline=",".join("%.2f" % p for p in sparkline)))
return Template("""
<h1>Flaky rate over last week</h1>
<table class="table" id="flaky-rate">
<thead>
<tr>
<th data-order-sequence='["asc"]'>test</th>
<th data-order-sequence='["desc"]'>failure rate (7-day)</th>
<th data-order-sequence='["desc"]'>failure rate (2-day)</th>
<th data-orderable="false">trend</th>
</tr>
</thead>
{% for r in results %}
<tr>
<td><a href="/test_drilldown?test_name={{ r.test_name |urlencode }}">
{{ r.test_name |e }}
</a></td>
<td data-order="{{ r.rate_7day }}">
{{ r.failures_7day |e }} / {{ r.runs_7day }}
({{ "%.2f"|format(r.rate_7day) }}%)
</td>
<td data-order="{{ r.rate_2day }}">
{{ r.failures_2day |e }} / {{ r.runs_2day }}
{% if r.runs_2day > 0 %}
({{ "%.2f"|format(r.rate_2day) }}%)
{% endif %}
</td>
<td><span class="inlinesparkline">{{ r.sparkline |e }}</span></td>
</tr>
{% endfor %}
</table>
<script type="text/javascript">
$(function() {
$('.inlinesparkline').sparkline('html', {
'height': 25,
'width': '40px',
'chartRangeMin': 0,
'tooltipFormatter': function(sparkline, options, fields) {
return String(7 - fields.x) + "d ago: " + fields.y + "%"; }
});
$('#flaky-rate').DataTable({ paging: false, searching: false, info: false });
});
</script>
""").render(results=results)
@cherrypy.expose
def list_failed_tests(self, build_pattern, num_days):
num_days = int(num_days)
c = self.execute_query(
"""SELECT DISTINCT
test_name
FROM test_results
WHERE timestamp > NOW() - INTERVAL %(num_days)s DAY
AND status != 0
AND build_id LIKE %(build_pattern)s""",
dict(build_pattern=build_pattern,
num_days=num_days))
cherrypy.response.headers['Content-Type'] = 'text/plain'
return "\n".join(row['test_name'] for row in c.fetchall())
@cherrypy.expose
def test_drilldown(self, test_name):
# Get summary statistics for the test, grouped by revision
c = self.execute_query(
"""SELECT
revision,
MIN(timestamp) AS first_run,
SUM(IF(status != 0, 1, 0)) AS num_failures,
COUNT(*) AS num_runs
FROM test_results
WHERE timestamp > NOW() - INTERVAL 1 WEEK
AND test_name = %(test_name)s
GROUP BY revision
ORDER BY first_run DESC""",
dict(test_name=test_name))
revision_rows = c.fetchall()
# Convert to a dictionary, by revision
rev_dict = dict( [(r['revision'], r) for r in revision_rows] )
# Add an empty 'runs' array to each revision to be filled in below
for r in revision_rows:
r['runs'] = []
# Append the specific info on failures
c.execute("SELECT * from test_results "
"WHERE timestamp > NOW() - INTERVAL 1 WEEK "
"AND test_name = %(test_name)s "
"AND status != 0",
dict(test_name=test_name))
for failure in c.fetchall():
rev_dict[failure['revision']]['runs'].append(failure)
return self.render_container(Template("""
<h1>{{ test_name |e }} flakiness over recent revisions</h1>
{% for r in revision_rows %}
<h4>{{ r.revision }} (Failed {{ r.num_failures }} / {{ r.num_runs }})</h4>
{% if r.num_failures > 0 %}
<table class="table">
<tr>
<th>time</th>
<th>config</th>
<th>exit code</th>
<th>machine</th>
<th>build</th>
</tr>
{% for run in r.runs %}
<tr {% if run.status != 0 %}
style="background-color: #faa;"
{% else %}
style="background-color: #afa;"
{% endif %}>
<td>{{ run.timestamp |e }}</td>
<td>{{ run.build_config |e }}</td>
<td>{{ run.status |e }}
{% if run.log_key %}
<a href="/download_log?key={{ run.log_key |urlencode }}">failure log</a> |
<a href="/diagnose?key={{ run.log_key |urlencode }}">diagnose</a>
{% endif %}
</td>
<td>{{ run.hostname |e }}</td>
<td>{{ run.build_id |e }}</td>
</tr>
{% endfor %}
</table>
{% endif %}
{% endfor %}
""").render(revision_rows=revision_rows, test_name=test_name))
@cherrypy.expose
def index(self):
body = self.flaky_report_html()
body += "<hr/>"
body += self.recently_failed_html()
return self.render_container(body)
def render_container(self, body):
""" Render the "body" HTML inside of a bootstrap container page. """
template = Template("""
<!DOCTYPE html>
<html>
<head><title>Kudu test results</title>
<link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css" />
<link rel="stylesheet" type="text/css" href="//cdn.datatables.net/1.10.12/css/jquery.dataTables.css" />
<style>
.new-date { border-bottom: 2px solid #666; }
#flaky-rate tr :nth-child(1) { width: 70%; }
/* make sparkline data not show up before loading */
.inlinesparkline { color: #fff; }
/* fix sparkline tooltips */
.jqstooltip {
-webkit-box-sizing: content-box;
-moz-box-sizing: content-box;
box-sizing: content-box;
}
</style>
</head>
<body>
<script src="//ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<script src="//maxcdn.bootstrapcdn.com/bootstrap/3.2.0/js/bootstrap.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-sparklines/2.1.2/jquery.sparkline.min.js"></script>
<script src="//cdn.datatables.net/1.10.12/js/jquery.dataTables.js"></script>
<div class="container-fluid">
{{ body }}
</div>
</body>
</html>
""")
return template.render(body=body)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
cherrypy.config.update(
{'server.socket_host': '0.0.0.0'} )
cherrypy.quickstart(TRServer())
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variables.
See the [Variables](https://www.tensorflow.org/guide/variables) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_state_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access,g-doc-return-or-yield,g-doc-args
def variable_op(shape, dtype, name="Variable", set_shape=True, container="",
shared_name=""):
"""Deprecated. Used variable_op_v2 instead."""
if not set_shape:
shape = tensor_shape.unknown_shape()
ret = gen_state_ops.variable(shape=shape, dtype=dtype, name=name,
container=container, shared_name=shared_name)
# TODO(mrry): Move this to where it is used, so we can get rid of this op
# wrapper?
if set_shape:
ret.set_shape(shape)
return ret
def variable_op_v2(shape, dtype, name="Variable", container="", shared_name=""):
"""Create a variable Operation.
See also variables.Variable.
Args:
shape: The shape of the tensor managed by this variable
dtype: The underlying type of the tensor values.
name: optional name to use for the variable op.
container: An optional string. Defaults to "".
If non-empty, this variable is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional string. Defaults to "".
If non-empty, this variable is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
Returns:
A variable tensor.
"""
return gen_state_ops.variable_v2(
shape=shape,
dtype=dtype,
name=name,
container=container,
shared_name=shared_name)
def init_variable(v, init, name="init"):
"""Initializes variable with "init".
This op does the following:
if init is a Tensor, v = init
if callable(init): v = init(VariableShape(v), v.dtype)
Args:
v: Variable to initialize
init: Tensor to assign to v,
Or an object convertible to Tensor e.g. nparray,
Or an Initializer that generates a tensor given the shape and type of v.
An "Initializer" is a callable that returns a tensor that "v" should be
set to. It will be called as init(shape, dtype).
name: Optional name for the op.
Returns:
The operation that initializes v.
"""
with ops.name_scope(None, v.op.name + "/", [v, init]):
with ops.name_scope(name) as scope:
with ops.colocate_with(v):
if callable(init):
assert v.get_shape().is_fully_defined(), "Variable shape unknown."
# TODO(mrry): Convert to v.shape when the property and
# accessor are reconciled (and all initializers support
# tf.TensorShape objects).
value = init(v.get_shape().as_list(), v.dtype.base_dtype)
value = ops.convert_to_tensor(value, name="value")
return gen_state_ops.assign(v, value, name=scope)
else:
init = ops.convert_to_tensor(init, name="init")
return gen_state_ops.assign(v, init, name=scope)
def is_variable_initialized(ref, name=None):
"""Checks whether a tensor has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.is_variable_initialized(ref=ref, name=name)
# Handle resource variables.
return ref.is_initialized(name=name)
@tf_export(v1=["assign_sub"])
def assign_sub(ref, value, use_locking=None, name=None):
"""Update 'ref' by subtracting 'value' from it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`. Must be one of the following types:
`float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`,
`int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
value: A `Tensor`. Must have the same type as `ref`.
The value to be subtracted to the variable.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign_sub(
ref, value, use_locking=use_locking, name=name)
return ref.assign_sub(value)
@tf_export(v1=["assign_add"])
def assign_add(ref, value, use_locking=None, name=None):
"""Update 'ref' by adding 'value' to it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`. Must be one of the following types:
`float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`,
`int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
value: A `Tensor`. Must have the same type as `ref`.
The value to be added to the variable.
use_locking: An optional `bool`. Defaults to `False`.
If True, the addition will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign_add(
ref, value, use_locking=use_locking, name=name)
return ref.assign_add(value)
@tf_export(v1=["assign"])
def assign(ref, value, validate_shape=None, use_locking=None, name=None):
"""Update 'ref' by assigning 'value' to it.
This operation outputs a Tensor that holds the new value of 'ref' after
the value has been assigned. This makes it easier to chain operations
that need to use the reset value.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
value: A `Tensor`. Must have the same type as `ref`.
The value to be assigned to the variable.
validate_shape: An optional `bool`. Defaults to `True`.
If true, the operation will validate that the shape
of 'value' matches the shape of the Tensor being assigned to. If false,
'ref' will take on the shape of 'value'.
use_locking: An optional `bool`. Defaults to `True`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A `Tensor` that will hold the new value of 'ref' after
the assignment has completed.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign(
ref, value, use_locking=use_locking, name=name,
validate_shape=validate_shape)
return ref.assign(value, name=name)
@tf_export(v1=["count_up_to"])
@deprecated(None, "Prefer Dataset.range instead.")
def count_up_to(ref, limit, name=None):
r"""Increments 'ref' until it reaches 'limit'.
Args:
ref: A Variable. Must be one of the following types: `int32`, `int64`.
Should be from a scalar `Variable` node.
limit: An `int`.
If incrementing ref would bring it above limit, instead generates an
'OutOfRange' error.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `ref`.
A copy of the input before increment. If nothing else modifies the
input, the values produced will all be distinct.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.count_up_to(ref, limit=limit, name=name)
return gen_state_ops.resource_count_up_to(
ref.handle, limit, T=ref.dtype, name=name)
@tf_export(v1=["scatter_update"])
def scatter_update(ref, indices, updates, use_locking=True, name=None):
# pylint: disable=line-too-long
r"""Applies sparse updates to a variable reference.
This operation computes
```python
# Scalar indices
ref[indices, ...] = updates[...]
# Vector indices (for each i)
ref[indices[i], ...] = updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
If values in `ref` is to be updated more than once, because there are
duplicate entries in `indices`, the order at which the updates happen
for each value is undefined.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/ScatterUpdate.png" alt>
</div>
Args:
ref: A `Variable`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to store in `ref`.
use_locking: An optional `bool`. Defaults to `True`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_update(ref, indices, updates,
use_locking=use_locking, name=name)
return ref._lazy_read(gen_resource_variable_ops.resource_scatter_update( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export(v1=["scatter_nd_update"])
def scatter_nd_update(ref, indices, updates, use_locking=True, name=None):
r"""Applies sparse `updates` to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to update 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
update = tf.scatter_nd_update(ref, indices, updates)
with tf.Session() as sess:
print sess.run(update)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
ref: A Variable.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into ref.
updates: A `Tensor`. Must have the same type as `ref`.
A Tensor. Must have the same type as ref. A tensor of updated
values to add to ref.
use_locking: An optional `bool`. Defaults to `True`.
An optional bool. Defaults to True. If True, the assignment will
be protected by a lock; otherwise the behavior is undefined,
but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The value of the variable after the update.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_nd_update(
ref, indices, updates, use_locking, name)
return ref._lazy_read(gen_state_ops.resource_scatter_nd_update( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export(v1=["scatter_add"])
def scatter_add(ref, indices, updates, use_locking=False, name=None):
# pylint: disable=line-too-long
r"""Adds sparse updates to the variable referenced by `resource`.
This operation computes
```python
# Scalar indices
ref[indices, ...] += updates[...]
# Vector indices (for each i)
ref[indices[i], ...] += updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the updated value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
</div>
Args:
ref: A `Variable`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to store in `ref`.
use_locking: An optional `bool`. Defaults to `False`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_add(ref, indices, updates,
use_locking=use_locking, name=name)
return ref._lazy_read(gen_resource_variable_ops.resource_scatter_add( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export(v1=["scatter_nd_add"])
def scatter_nd_add(ref, indices, updates, use_locking=False, name=None):
r"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that addition would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1], [7]])
updates = tf.constant([9, 10, 11, 12])
add = tf.scatter_nd_add(ref, indices, updates)
with tf.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`,
`qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`,
`uint32`, `uint64`. A mutable Tensor. Should be from a Variable node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into ref.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to add to ref.
use_locking: An optional `bool`. Defaults to `False`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_nd_add(
ref, indices, updates, use_locking, name)
return ref._lazy_read(gen_state_ops.resource_scatter_nd_add( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export(v1=["scatter_sub"])
def scatter_sub(ref, indices, updates, use_locking=False, name=None):
r"""Subtracts sparse updates to a variable reference.
```python
# Scalar indices
ref[indices, ...] -= updates[...]
# Vector indices (for each i)
ref[indices[i], ...] -= updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their (negated) contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]` or
`updates.shape = []`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%"
src="https://www.tensorflow.org/images/ScatterSub.png" alt>
</div>
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`,
`qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`,
`uint32`, `uint64`. Should be from a `Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to subtract from `ref`.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_sub(ref, indices, updates,
use_locking=use_locking, name=name)
return ref._lazy_read(gen_resource_variable_ops.resource_scatter_sub( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export(v1=["scatter_nd_sub"])
def scatter_nd_sub(ref, indices, updates, use_locking=False, name=None):
r"""Applies sparse subtraction to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]
```
For example, say we want to subtract 4 scattered elements from a rank-1 tensor
with 8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = tf.scatter_nd_sub(ref, indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, -9, 3, -6, -6, 6, 7, -4]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`,
`qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`,
`uint32`, `uint64`. A mutable Tensor. Should be from a Variable node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into ref.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to add to ref.
use_locking: An optional `bool`. Defaults to `False`.
An optional bool. Defaults to True. If True, the assignment will
be protected by a lock; otherwise the behavior is undefined,
but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_nd_sub(
ref, indices, updates, use_locking, name)
return ref._lazy_read(gen_state_ops.resource_scatter_nd_sub( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export(v1=["scatter_mul"])
def scatter_mul(ref, indices, updates, use_locking=False, name=None):
# pylint: disable=line-too-long
r"""Multiplies sparse updates into a variable reference.
This operation computes
```python
# Scalar indices
ref[indices, ...] *= updates[...]
# Vector indices (for each i)
ref[indices[i], ...] *= updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions multiply.
Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape =
[]`.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`,
`qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`,
`uint32`, `uint64`. Should be from a `Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A
tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated
values to multiply to `ref`.
use_locking: An optional `bool`. Defaults to `False`. If True, the operation
will be protected by a lock; otherwise the behavior is undefined, but may
exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
return gen_state_ops.scatter_mul(
ref=ref,
indices=indices,
updates=updates,
use_locking=use_locking,
name=name)
@tf_export(v1=["scatter_div"])
def scatter_div(ref, indices, updates, use_locking=False, name=None):
# pylint: disable=line-too-long
r"""Divides a variable reference by sparse updates.
This operation computes
```python
# Scalar indices
ref[indices, ...] /= updates[...]
# Vector indices (for each i)
ref[indices[i], ...] /= updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions divide.
Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape =
[]`.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`,
`qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`,
`uint32`, `uint64`. Should be from a `Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A
tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`. A tensor of values
that `ref` is divided by.
use_locking: An optional `bool`. Defaults to `False`. If True, the operation
will be protected by a lock; otherwise the behavior is undefined, but may
exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
return gen_state_ops.scatter_div(
ref=ref,
indices=indices,
updates=updates,
use_locking=use_locking,
name=name)
@tf_export(v1=["scatter_max"])
def scatter_max(ref, indices, updates, use_locking=False, name=None):
# pylint: disable=line-too-long
r"""Reduces sparse updates into a variable reference using the `max` operation.
This operation computes
# Scalar indices
ref[indices, ...] = max(ref[indices, ...], updates[...])
# Vector indices (for each i)
ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...],
updates[i, ..., j, ...])
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions combine.
Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape =
[]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png"
alt>
</div>
Args:
ref: A mutable `Tensor`. Must be one of the following types: `half`,
`bfloat16`, `float32`, `float64`, `int32`, `int64`. Should be from a
`Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A
tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated
values to reduce into `ref`.
use_locking: An optional `bool`. Defaults to `False`. If True, the update
will be protected by a lock; otherwise the behavior is undefined, but may
exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
return gen_state_ops.scatter_max(
ref=ref,
indices=indices,
updates=updates,
use_locking=use_locking,
name=name)
@tf_export(v1=["scatter_min"])
def scatter_min(ref, indices, updates, use_locking=False, name=None):
# pylint: disable=line-too-long
r"""Reduces sparse updates into a variable reference using the `min` operation.
This operation computes
# Scalar indices
ref[indices, ...] = min(ref[indices, ...], updates[...])
# Vector indices (for each i)
ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...],
updates[i, ..., j, ...])
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions combine.
Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape =
[]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png"
alt>
</div>
Args:
ref: A mutable `Tensor`. Must be one of the following types: `half`,
`bfloat16`, `float32`, `float64`, `int32`, `int64`. Should be from a
`Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. A
tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`. A tensor of updated
values to reduce into `ref`.
use_locking: An optional `bool`. Defaults to `False`. If True, the update
will be protected by a lock; otherwise the behavior is undefined, but may
exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
return gen_state_ops.scatter_min(
ref=ref,
indices=indices,
updates=updates,
use_locking=use_locking,
name=name)
@tf_export(v1=["batch_scatter_update"])
@deprecation.deprecated(
"2018-11-29", "Use the batch_scatter_update method of Variable instead.")
def batch_scatter_update(ref, indices, updates, use_locking=True, name=None):
"""Generalization of `tf.scatter_update` to axis different than 0.
Analogous to `batch_gather`. This assumes that `ref`, `indices` and `updates`
have a series of leading dimensions that are the same for all of them, and the
updates are performed on the last dimension of indices. In other words, the
dimensions should be the following:
`num_prefix_dims = indices.ndims - 1`
`batch_dim = num_prefix_dims + 1`
`updates.shape = indices.shape + var.shape[batch_dim:]`
where
`updates.shape[:num_prefix_dims]`
`== indices.shape[:num_prefix_dims]`
`== var.shape[:num_prefix_dims]`
And the operation performed can be expressed as:
`var[i_1, ..., i_n, indices[i_1, ..., i_n, j]] = updates[i_1, ..., i_n, j]`
When indices is a 1D tensor, this operation is equivalent to
`tf.scatter_update`.
To avoid this operation there would be 2 alternatives:
1) Reshaping the variable by merging the first `ndims` dimensions. However,
this is not possible because `tf.reshape` returns a Tensor, which we
cannot use `tf.scatter_update` on.
2) Looping over the first `ndims` of the variable and using
`tf.scatter_update` on the subtensors that result of slicing the first
dimension. This is a valid option for `ndims = 1`, but less efficient than
this implementation.
See also `tf.scatter_update` and `tf.scatter_nd_update`.
Args:
ref: `Variable` to scatter onto.
indices: Tensor containing indices as described above.
updates: Tensor of updates to apply to `ref`.
use_locking: Boolean indicating whether to lock the writing operation.
name: Optional scope name string.
Returns:
Ref to `variable` after it has been modified.
Raises:
ValueError: If the initial `ndims` of `ref`, `indices`, and `updates` are
not the same.
"""
with ops.name_scope(name):
indices = ops.convert_to_tensor(indices, name="indices")
indices_shape = array_ops.shape(indices)
indices_dimensions = indices.get_shape().ndims
if indices_dimensions is None:
raise ValueError("batch_gather does not allow indices with unknown "
"shape.")
nd_indices = array_ops.expand_dims(indices, axis=-1)
nd_indices_list = []
# Scatter ND requires indices to have an additional dimension, in which the
# coordinates of the updated things are specified. For this to be adapted to
# the scatter_update with several leading dimensions, we simply make use of
# a tf.range for all the leading dimensions followed by concat of all the
# coordinates we created with the original indices.
# For example if indices.shape = [2, 3, 4], we should generate the following
# indices for tf.scatter_nd_update:
# nd_indices[:, :, 0] = [[0, 0, 0], [1, 1, 1]]
# nd_indices[:, :, 1] = [[0, 1, 2], [0, 1, 2]]
# nd_indices[:, :, 2] = indices
for dimension in range(indices_dimensions - 1):
# In this loop we generate the following for the example (one for each
# iteration).
# nd_indices[:, :, 0] = [[0, 0, 0], [1, 1, 1]]
# nd_indices[:, :, 1] = [[0, 1, 2], [0, 1, 2]]
# This is done at every iteration with a tf.range over the size of the
# i-th dimension and using broadcasting over the desired shape.
dimension_size = indices_shape[dimension]
shape_to_broadcast = [1] * (indices_dimensions + 1)
shape_to_broadcast[dimension] = dimension_size
dimension_range = array_ops.reshape(
gen_math_ops._range(0, dimension_size, 1), shape_to_broadcast)
if dimension_range.dtype.base_dtype != nd_indices.dtype:
dimension_range = gen_math_ops.cast(dimension_range, nd_indices.dtype)
nd_indices_list.append(
dimension_range * array_ops.ones_like(nd_indices))
# Add the original indices at the end, as described above, and concat.
nd_indices_list.append(nd_indices)
final_indices = array_ops.concat(nd_indices_list, axis=-1)
return scatter_nd_update(
ref, final_indices, updates, use_locking=use_locking)
| |
# Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from healing.openstack.common.gettextutils import _
"""
import copy
import functools
import gettext
import locale
from logging import handlers
import os
from babel import localedata
import six
_AVAILABLE_LANGUAGES = {}
# FIXME(dhellmann): Remove this when moving to oslo.i18n.
USE_LAZY = False
class TranslatorFactory(object):
"""Create translator functions
"""
def __init__(self, domain, lazy=False, localedir=None):
"""Establish a set of translation functions for the domain.
:param domain: Name of translation domain,
specifying a message catalog.
:type domain: str
:param lazy: Delays translation until a message is emitted.
Defaults to False.
:type lazy: Boolean
:param localedir: Directory with translation catalogs.
:type localedir: str
"""
self.domain = domain
self.lazy = lazy
if localedir is None:
localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
self.localedir = localedir
def _make_translation_func(self, domain=None):
"""Return a new translation function ready for use.
Takes into account whether or not lazy translation is being
done.
The domain can be specified to override the default from the
factory, but the localedir from the factory is always used
because we assume the log-level translation catalogs are
installed in the same directory as the main application
catalog.
"""
if domain is None:
domain = self.domain
if self.lazy:
return functools.partial(Message, domain=domain)
t = gettext.translation(
domain,
localedir=self.localedir,
fallback=True,
)
if six.PY3:
return t.gettext
return t.ugettext
@property
def primary(self):
"The default translation function."
return self._make_translation_func()
def _make_log_translation_func(self, level):
return self._make_translation_func(self.domain + '-log-' + level)
@property
def log_info(self):
"Translate info-level log messages."
return self._make_log_translation_func('info')
@property
def log_warning(self):
"Translate warning-level log messages."
return self._make_log_translation_func('warning')
@property
def log_error(self):
"Translate error-level log messages."
return self._make_log_translation_func('error')
@property
def log_critical(self):
"Translate critical-level log messages."
return self._make_log_translation_func('critical')
# NOTE(dhellmann): When this module moves out of the incubator into
# oslo.i18n, these global variables can be moved to an integration
# module within each application.
# Create the global translation functions.
_translators = TranslatorFactory('healing')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
# NOTE(dhellmann): End of globals that will move to the application's
# integration module.
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
# FIXME(dhellmann): This function will be removed in oslo.i18n,
# because the TranslatorFactory makes it superfluous.
global _, _LI, _LW, _LE, _LC, USE_LAZY
tf = TranslatorFactory('healing', lazy=True)
_ = tf.primary
_LI = tf.log_info
_LW = tf.log_warning
_LE = tf.log_error
_LC = tf.log_critical
USE_LAZY = True
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
from six import moves
tf = TranslatorFactory(domain, lazy=True)
moves.builtins.__dict__['_'] = tf.primary
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='healing', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
# Merge the dictionaries
# Copy each item in case one does not support deep copy.
params = {}
if isinstance(self.params, dict):
for key, val in self.params.items():
params[key] = self._copy_param(val)
for key, val in other.items():
params[key] = self._copy_param(val)
else:
params = self._copy_param(other)
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except Exception:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
if six.PY2:
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
| |
#!/usr/bin/env python
#
# ESP32 efuse table generation tool
#
# Converts efuse table to header file efuse_table.h.
#
# Copyright 2017-2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import argparse
import os
import re
import sys
import hashlib
__version__ = '1.0'
quiet = False
max_blk_len = 256
copyright = '''// Copyright 2017-2018 Espressif Systems (Shanghai) PTE LTD
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at",
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License
'''
def status(msg):
""" Print status message to stderr """
if not quiet:
critical(msg)
def critical(msg):
""" Print critical message to stderr """
sys.stderr.write(msg)
sys.stderr.write('\n')
class FuseTable(list):
def __init__(self):
super(FuseTable, self).__init__(self)
self.md5_digest_table = ""
@classmethod
def from_csv(cls, csv_contents):
res = FuseTable()
lines = csv_contents.splitlines()
def expand_vars(f):
f = os.path.expandvars(f)
m = re.match(r'(?<!\\)\$([A-Za-z_][A-Za-z0-9_]*)', f)
if m:
raise InputError("unknown variable '%s'" % (m.group(1)))
return f
for line_no in range(len(lines)):
line = expand_vars(lines[line_no]).strip()
if line.startswith("#") or len(line) == 0:
continue
try:
res.append(FuseDefinition.from_csv(line))
except InputError as e:
raise InputError("Error at line %d: %s" % (line_no + 1, e))
except Exception:
critical("Unexpected error parsing line %d: %s" % (line_no + 1, line))
raise
# fix up missing bit_start
last_efuse_block = None
for e in res:
if last_efuse_block != e.efuse_block:
last_end = 0
if e.bit_start is None:
e.bit_start = last_end
last_end = e.bit_start + e.bit_count
last_efuse_block = e.efuse_block
res.verify_duplicate_name()
# fix up missing field_name
last_field = None
for e in res:
if e.field_name == "" and last_field is None:
raise InputError("Error at line %d: %s missing field name" % (line_no + 1, e))
elif e.field_name == "" and last_field is not None:
e.field_name = last_field.field_name
last_field = e
# fill group
names = [p.field_name for p in res]
duplicates = set(n for n in names if names.count(n) > 1)
if len(duplicates) != 0:
i_count = 0
for p in res:
if len(duplicates.intersection([p.field_name])) != 0:
p.group = str(i_count)
i_count += 1
else:
i_count = 0
res.verify_duplicate_name()
# clac md5 for table
res.calc_md5()
return res
def verify_duplicate_name(self):
# check on duplicate name
names = [p.field_name for p in self]
duplicates = set(n for n in names if names.count(n) > 1)
# print sorted duplicate partitions by name
if len(duplicates) != 0:
fl_error = False
for p in self:
field_name = p.field_name + p.group
if field_name != "" and len(duplicates.intersection([field_name])) != 0:
fl_error = True
print("Field at %s, %s, %s, %s have dublicate field_name" %
(p.field_name, p.efuse_block, p.bit_start, p.bit_count))
if fl_error is True:
raise InputError("Field names must be unique")
def verify(self, type_table=None):
for p in self:
p.verify(type_table)
self.verify_duplicate_name()
# check for overlaps
last = None
for p in sorted(self, key=lambda x:(x.efuse_block, x.bit_start)):
if last is not None and last.efuse_block == p.efuse_block and p.bit_start < last.bit_start + last.bit_count:
raise InputError("Field at %s, %s, %s, %s overlaps %s, %s, %s, %s" %
(p.field_name, p.efuse_block, p.bit_start, p.bit_count,
last.field_name, last.efuse_block, last.bit_start, last.bit_count))
last = p
def calc_md5(self):
txt_table = ''
for p in self:
txt_table += "%s %s %d %s %s" % (p.field_name, p.efuse_block, p.bit_start, str(p.get_bit_count()), p.comment) + "\n"
self.md5_digest_table = hashlib.md5(txt_table.encode('utf-8')).hexdigest()
def show_range_used_bits(self):
# print used and free bits
rows = ''
rows += 'Sorted efuse table:\n'
num = 1
rows += "{0} \t{1:<30} \t{2} \t{3} \t{4}".format("#", "field_name", "efuse_block", "bit_start", "bit_count") + "\n"
for p in sorted(self, key=lambda x:(x.efuse_block, x.bit_start)):
rows += "{0} \t{1:<30} \t{2} \t{3:^8} \t{4:^8}".format(num, p.field_name, p.efuse_block, p.bit_start, p.bit_count) + "\n"
num += 1
rows += '\nUsed bits in efuse table:\n'
last = None
for p in sorted(self, key=lambda x:(x.efuse_block, x.bit_start)):
if last is None:
rows += '%s \n[%d ' % (p.efuse_block, p.bit_start)
if last is not None:
if last.efuse_block != p.efuse_block:
rows += '%d] \n\n%s \n[%d ' % (last.bit_start + last.bit_count - 1, p.efuse_block, p.bit_start)
elif last.bit_start + last.bit_count != p.bit_start:
rows += '%d] [%d ' % (last.bit_start + last.bit_count - 1, p.bit_start)
last = p
rows += '%d] \n' % (last.bit_start + last.bit_count - 1)
rows += '\nNote: Not printed ranges are free for using. (bits in EFUSE_BLK0 are reserved for Espressif)\n'
return rows
def get_str_position_last_free_bit_in_blk(self, blk):
last_used_bit = 0
for p in self:
if p.efuse_block == blk:
if p.define is not None:
return p.get_bit_count()
else:
if last_used_bit < p.bit_start + p.bit_count:
last_used_bit = p.bit_start + p.bit_count
if last_used_bit == 0:
return None
return str(last_used_bit)
def to_header(self, file_name):
rows = [copyright]
rows += ["#ifdef __cplusplus",
'extern "C" {',
"#endif",
"",
"",
"// md5_digest_table " + self.md5_digest_table,
"// This file was generated from the file " + file_name + ".csv. DO NOT CHANGE THIS FILE MANUALLY.",
"// If you want to change some fields, you need to change " + file_name + ".csv file",
"// then run `efuse_common_table` or `efuse_custom_table` command it will generate this file.",
"// To show efuse_table run the command 'show_efuse_table'.",
"",
""]
last_field_name = ''
for p in self:
if (p.field_name != last_field_name):
rows += ["extern const esp_efuse_desc_t* " + "ESP_EFUSE_" + p.field_name + "[];"]
last_field_name = p.field_name
rows += ["",
"#ifdef __cplusplus",
"}",
"#endif",
""]
return '\n'.join(rows) + "\n"
def to_c_file(self, file_name, debug):
rows = [copyright]
rows += ['#include "sdkconfig.h"',
'#include "esp_efuse.h"',
'#include <assert.h>',
'#include "' + file_name + '.h"',
"",
"// md5_digest_table " + self.md5_digest_table,
"// This file was generated from the file " + file_name + ".csv. DO NOT CHANGE THIS FILE MANUALLY.",
"// If you want to change some fields, you need to change " + file_name + ".csv file",
"// then run `efuse_common_table` or `efuse_custom_table` command it will generate this file.",
"// To show efuse_table run the command 'show_efuse_table'."]
rows += [""]
rows += ["#define MAX_BLK_LEN CONFIG_EFUSE_MAX_BLK_LEN"]
rows += [""]
last_free_bit_blk1 = self.get_str_position_last_free_bit_in_blk("EFUSE_BLK1")
last_free_bit_blk2 = self.get_str_position_last_free_bit_in_blk("EFUSE_BLK2")
last_free_bit_blk3 = self.get_str_position_last_free_bit_in_blk("EFUSE_BLK3")
rows += ["// The last free bit in the block is counted over the entire file."]
if last_free_bit_blk1 is not None:
rows += ["#define LAST_FREE_BIT_BLK1 " + last_free_bit_blk1]
if last_free_bit_blk2 is not None:
rows += ["#define LAST_FREE_BIT_BLK2 " + last_free_bit_blk2]
if last_free_bit_blk3 is not None:
rows += ["#define LAST_FREE_BIT_BLK3 " + last_free_bit_blk3]
rows += [""]
if last_free_bit_blk1 is not None:
rows += ['_Static_assert(LAST_FREE_BIT_BLK1 <= MAX_BLK_LEN, "The eFuse table does not match the coding scheme. '
'Edit the table and restart the efuse_common_table or efuse_custom_table command to regenerate the new files.");']
if last_free_bit_blk2 is not None:
rows += ['_Static_assert(LAST_FREE_BIT_BLK2 <= MAX_BLK_LEN, "The eFuse table does not match the coding scheme. '
'Edit the table and restart the efuse_common_table or efuse_custom_table command to regenerate the new files.");']
if last_free_bit_blk3 is not None:
rows += ['_Static_assert(LAST_FREE_BIT_BLK3 <= MAX_BLK_LEN, "The eFuse table does not match the coding scheme. '
'Edit the table and restart the efuse_common_table or efuse_custom_table command to regenerate the new files.");']
rows += [""]
last_name = ''
for p in self:
if (p.field_name != last_name):
if last_name != '':
rows += ["};\n"]
rows += ["static const esp_efuse_desc_t " + p.field_name + "[] = {"]
last_name = p.field_name
rows += [p.to_struct(debug) + ","]
rows += ["};\n"]
rows += ["\n\n\n"]
last_name = ''
for p in self:
if (p.field_name != last_name):
if last_name != '':
rows += [" NULL",
"};\n"]
rows += ["const esp_efuse_desc_t* " + "ESP_EFUSE_" + p.field_name + "[] = {"]
last_name = p.field_name
index = str(0) if str(p.group) == "" else str(p.group)
rows += [" &" + p.field_name + "[" + index + "], \t\t// " + p.comment]
rows += [" NULL",
"};\n"]
return '\n'.join(rows) + "\n"
class FuseDefinition(object):
def __init__(self):
self.field_name = ""
self.group = ""
self.efuse_block = ""
self.bit_start = None
self.bit_count = None
self.define = None
self.comment = ""
@classmethod
def from_csv(cls, line):
""" Parse a line from the CSV """
line_w_defaults = line + ",,,," # lazy way to support default fields
fields = [f.strip() for f in line_w_defaults.split(",")]
res = FuseDefinition()
res.field_name = fields[0]
res.efuse_block = res.parse_block(fields[1])
res.bit_start = res.parse_num(fields[2])
res.bit_count = res.parse_bit_count(fields[3])
if res.bit_count is None or res.bit_count == 0:
raise InputError("Field bit_count can't be empty")
res.comment = fields[4]
return res
def parse_num(self, strval):
if strval == "":
return None # Field will fill in default
return self.parse_int(strval)
def parse_bit_count(self, strval):
if strval == "MAX_BLK_LEN":
self.define = strval
return self.get_max_bits_of_block()
else:
return self.parse_num(strval)
def parse_int(self, v):
try:
return int(v, 0)
except ValueError:
raise InputError("Invalid field value %s" % v)
def parse_block(self, strval):
if strval == "":
raise InputError("Field 'efuse_block' can't be left empty.")
if strval not in ["EFUSE_BLK0", "EFUSE_BLK1", "EFUSE_BLK2", "EFUSE_BLK3"]:
raise InputError("Field 'efuse_block' should consist from EFUSE_BLK0..EFUSE_BLK3")
return strval
def get_max_bits_of_block(self):
'''common_table: EFUSE_BLK0, EFUSE_BLK1, EFUSE_BLK2, EFUSE_BLK3
custom_table: ----------, ----------, ----------, EFUSE_BLK3(some reserved in common_table)
'''
if self.efuse_block == "EFUSE_BLK0":
return 256
else:
return max_blk_len
def verify(self, type_table):
if self.efuse_block is None:
raise ValidationError(self, "efuse_block field is not set")
if self.bit_count is None:
raise ValidationError(self, "bit_count field is not set")
if type_table is not None:
if type_table == "custom_table":
if self.efuse_block != "EFUSE_BLK3":
raise ValidationError(self, "custom_table should use only EFUSE_BLK3")
max_bits = self.get_max_bits_of_block()
if self.bit_start + self.bit_count > max_bits:
raise ValidationError(self, "The field is outside the boundaries(max_bits = %d) of the %s block" % (max_bits, self.efuse_block))
def get_full_name(self):
def get_postfix(group):
postfix = ""
if group != "":
postfix = "_PART_" + group
return postfix
return self.field_name + get_postfix(self.group)
def get_bit_count(self, check_define=True):
if check_define is True and self.define is not None:
return self.define
else:
return self.bit_count
def to_struct(self, debug):
start = " {"
if debug is True:
start = " {" + '"' + self.field_name + '" ,'
return ", ".join([start + self.efuse_block,
str(self.bit_start),
str(self.get_bit_count()) + "}, \t // " + self.comment])
def process_input_file(file, type_table):
status("Parsing efuse CSV input file " + file.name + " ...")
input = file.read()
table = FuseTable.from_csv(input)
status("Verifying efuse table...")
table.verify(type_table)
return table
def ckeck_md5_in_file(md5, filename):
if os.path.exists(filename):
with open(filename, 'r') as f:
for line in f:
if md5 in line:
return True
return False
def create_output_files(name, output_table, debug):
file_name = os.path.splitext(os.path.basename(name))[0]
gen_dir = os.path.dirname(name)
dir_for_file_h = gen_dir + "/include"
try:
os.stat(dir_for_file_h)
except Exception:
os.mkdir(dir_for_file_h)
file_h_path = os.path.join(dir_for_file_h, file_name + ".h")
file_c_path = os.path.join(gen_dir, file_name + ".c")
# src files are the same
if ckeck_md5_in_file(output_table.md5_digest_table, file_c_path) is False:
status("Creating efuse *.h file " + file_h_path + " ...")
output = output_table.to_header(file_name)
with open(file_h_path, 'w') as f:
f.write(output)
status("Creating efuse *.c file " + file_c_path + " ...")
output = output_table.to_c_file(file_name, debug)
with open(file_c_path, 'w') as f:
f.write(output)
else:
print("Source files do not require updating correspond to csv file.")
def main():
global quiet
global max_blk_len
parser = argparse.ArgumentParser(description='ESP32 eFuse Manager')
parser.add_argument('--quiet', '-q', help="Don't print non-critical status messages to stderr", action='store_true')
parser.add_argument('--debug', help='Create header file with debug info', default=False, action="store_false")
parser.add_argument('--info', help='Print info about range of used bits', default=False, action="store_true")
parser.add_argument('--max_blk_len', help='Max number of bits in BLK1, BLK2 and BLK3', type=int, default=256)
parser.add_argument('common_input', help='Path to common CSV file to parse.', type=argparse.FileType('r'))
parser.add_argument('custom_input', help='Path to custom CSV file to parse.', type=argparse.FileType('r'), nargs='?', default=None)
args = parser.parse_args()
max_blk_len = args.max_blk_len
print("Max number of bits in BLK %d" % (max_blk_len))
if max_blk_len not in [256, 192, 128]:
raise InputError("Unsupported block length = %d" % (max_blk_len))
quiet = args.quiet
debug = args.debug
info = args.info
common_table = process_input_file(args.common_input, "common_table")
two_table = common_table
if args.custom_input is not None:
custom_table = process_input_file(args.custom_input, "custom_table")
two_table += custom_table
two_table.verify()
# save files.
if info is False:
if args.custom_input is None:
create_output_files(args.common_input.name, common_table, debug)
else:
create_output_files(args.custom_input.name, custom_table, debug)
else:
print(two_table.show_range_used_bits())
return 0
class InputError(RuntimeError):
def __init__(self, e):
super(InputError, self).__init__(e)
class ValidationError(InputError):
def __init__(self, p, message):
super(ValidationError, self).__init__("Entry %s invalid: %s" % (p.field_name, message))
if __name__ == '__main__':
try:
main()
except InputError as e:
print(e, file=sys.stderr)
sys.exit(2)
| |
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import os
import shutil
import sys
import tempfile
import uuid
from muranoclient.common import exceptions as muranoclient_exc
import six
from murano.common import config
from murano.common.i18n import _LE
from murano.dsl import exceptions
from murano.engine import yaql_yaml_loader
from murano.openstack.common import log as logging
from murano.packages import exceptions as pkg_exc
from murano.packages import load_utils
LOG = logging.getLogger(__name__)
class PackageLoader(six.with_metaclass(abc.ABCMeta)):
@abc.abstractmethod
def get_package(self, name):
pass
@abc.abstractmethod
def get_package_by_class(self, name):
pass
class ApiPackageLoader(PackageLoader):
def __init__(self, murano_client_factory, tenant_id):
self._cache_directory = self._get_cache_directory()
self._murano_client_factory = murano_client_factory
self.tenant_id = tenant_id
def get_package_by_class(self, name):
filter_opts = {'class_name': name}
try:
package_definition = self._get_definition(filter_opts)
except LookupError:
exc_info = sys.exc_info()
raise exceptions.NoPackageForClassFound(name), None, exc_info[2]
return self._get_package_by_definition(package_definition)
def get_package(self, name):
filter_opts = {'fqn': name}
try:
package_definition = self._get_definition(filter_opts)
except LookupError:
exc_info = sys.exc_info()
raise exceptions.NoPackageFound(name), None, exc_info[2]
return self._get_package_by_definition(package_definition)
@staticmethod
def _get_cache_directory():
base_directory = (
config.CONF.packages_opts.packages_cache or
os.path.join(tempfile.gettempdir(), 'murano-packages-cache')
)
directory = os.path.abspath(os.path.join(base_directory,
str(uuid.uuid4())))
os.makedirs(directory)
LOG.debug('Cache for package loader is located at: %s' % directory)
return directory
def _get_definition(self, filter_opts):
filter_opts['catalog'] = True
try:
packages = list(self._murano_client_factory().packages.filter(
**filter_opts))
if len(packages) > 1:
LOG.debug('Ambiguous package resolution: '
'more then 1 package found for query "{0}", '
'will resolve based on the ownership'.
format(filter_opts))
return get_best_package_match(packages, self.tenant_id)
elif len(packages) == 1:
return packages[0]
else:
LOG.debug('There are no packages matching filter '
'{0}'.format(filter_opts))
raise LookupError()
except muranoclient_exc.HTTPException:
LOG.debug('Failed to get package definition from repository')
raise LookupError()
def _get_package_by_definition(self, package_def):
package_id = package_def.id
package_name = package_def.fully_qualified_name
package_directory = os.path.join(self._cache_directory, package_name)
if os.path.exists(package_directory):
try:
return load_utils.load_from_dir(
package_directory, preload=True,
loader=yaql_yaml_loader.YaqlYamlLoader)
except pkg_exc.PackageLoadError:
LOG.exception(_LE(
'Unable to load package from cache. Clean-up...'))
shutil.rmtree(package_directory, ignore_errors=True)
try:
package_data = self._murano_client_factory().packages.download(
package_id)
except muranoclient_exc.HTTPException as e:
msg = 'Error loading package id {0}: {1}'.format(
package_id, str(e)
)
exc_info = sys.exc_info()
raise pkg_exc.PackageLoadError(msg), None, exc_info[2]
package_file = None
try:
with tempfile.NamedTemporaryFile(delete=False) as package_file:
package_file.write(package_data)
return load_utils.load_from_file(
package_file.name,
target_dir=package_directory,
drop_dir=False,
loader=yaql_yaml_loader.YaqlYamlLoader
)
except IOError:
msg = 'Unable to extract package data for %s' % package_id
exc_info = sys.exc_info()
raise pkg_exc.PackageLoadError(msg), None, exc_info[2]
finally:
try:
if package_file:
os.remove(package_file.name)
except OSError:
pass
def cleanup(self):
shutil.rmtree(self._cache_directory, ignore_errors=True)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
return False
class DirectoryPackageLoader(PackageLoader):
def __init__(self, base_path):
self._base_path = base_path
self._processed_entries = set()
self._packages_by_class = {}
self._packages_by_name = {}
self._build_index()
def get_package(self, name):
return self._packages_by_name.get(name)
def get_package_by_class(self, name):
return self._packages_by_class.get(name)
def _build_index(self):
for entry in os.listdir(self._base_path):
folder = os.path.join(self._base_path, entry)
if not os.path.isdir(folder) or entry in self._processed_entries:
continue
try:
package = load_utils.load_from_dir(
folder, preload=True,
loader=yaql_yaml_loader.YaqlYamlLoader)
except pkg_exc.PackageLoadError:
LOG.exception(_LE('Unable to load package from path: '
'{0}').format(entry))
continue
for c in package.classes:
self._packages_by_class[c] = package
self._packages_by_name[package.full_name] = package
self._processed_entries.add(entry)
def get_best_package_match(packages, tenant_id):
public = None
other = []
for package in packages:
if package.owner_id == tenant_id:
return package
elif package.is_public:
public = package
else:
other.append(package)
if public is not None:
return public
elif other:
return other[0]
| |
#!/usr/bin/env python
# encoding: utf-8
"""
Copyright (c) 2010 The Echo Nest. All rights reserved.
Created by Tyler Williams on 2010-04-25.
Utility functions to support the Echo Nest web API interface.
"""
import urllib
import urllib2
import httplib
import config
import logging
import socket
import re
import time
import os
import subprocess
import traceback
from types import StringType, UnicodeType
try:
import json
except ImportError:
import simplejson as json
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
TYPENAMES = (
('AR', 'artist'),
('SO', 'song'),
('RE', 'release'),
('TR', 'track'),
('PE', 'person'),
('DE', 'device'),
('LI', 'listener'),
('ED', 'editor'),
('TW', 'tweditor'),
('CA', 'catalog'),
)
foreign_regex = re.compile(r'^.+?:(%s):([^^]+)\^?([0-9\.]+)?' % r'|'.join(n[1] for n in TYPENAMES))
short_regex = re.compile(r'^((%s)[0-9A-Z]{16})\^?([0-9\.]+)?' % r'|'.join(n[0] for n in TYPENAMES))
long_regex = re.compile(r'music://id.echonest.com/.+?/(%s)/(%s)[0-9A-Z]{16}\^?([0-9\.]+)?' % (r'|'.join(n[0] for n in TYPENAMES), r'|'.join(n[0] for n in TYPENAMES)))
headers = [('User-Agent', 'Pyechonest %s' % (config.__version__,))]
class MyBaseHandler(urllib2.BaseHandler):
def default_open(self, request):
if config.TRACE_API_CALLS:
logger.info("%s" % (request.get_full_url(),))
request.start_time = time.time()
return None
class MyErrorProcessor(urllib2.HTTPErrorProcessor):
def http_response(self, request, response):
code = response.code
if config.TRACE_API_CALLS:
logger.info("took %2.2fs: (%i)" % (time.time()-request.start_time,code))
if code in [200, 400, 403, 500]:
return response
else:
urllib2.HTTPErrorProcessor.http_response(self, request, response)
opener = urllib2.build_opener(MyBaseHandler(), MyErrorProcessor())
opener.addheaders = headers
class EchoNestAPIError(Exception):
"""
Generic API errors.
"""
def __init__(self, code, message):
self.args = ('Echo Nest API Error %d: %s' % (code, message),)
def get_successful_response(raw_json):
try:
response_dict = json.loads(raw_json)
status_dict = response_dict['response']['status']
code = int(status_dict['code'])
message = status_dict['message']
if (code != 0):
# do some cute exception handling
raise EchoNestAPIError(code, message)
del response_dict['response']['status']
return response_dict
except ValueError:
logger.debug(traceback.format_exc())
raise EchoNestAPIError(-1, "Unknown error.")
# These two functions are to deal with the unknown encoded output of codegen (varies by platform and ID3 tag)
def reallyunicode(s, encoding="utf-8"):
if type(s) is StringType:
for args in ((encoding,), ('utf-8',), ('latin-1',), ('ascii', 'replace')):
try:
s = s.decode(*args)
break
except UnicodeDecodeError:
continue
if type(s) is not UnicodeType:
raise ValueError, "%s is not a string at all." % s
return s
def reallyUTF8(s):
return reallyunicode(s).encode("utf-8")
def codegen(filename, start=0, duration=30):
# Run codegen on the file and return the json. If start or duration is -1 ignore them.
cmd = config.CODEGEN_BINARY_OVERRIDE
if not cmd:
# Is this is posix platform, or is it windows?
if hasattr(os, 'uname'):
if(os.uname()[0] == "Darwin"):
cmd = "codegen.Darwin"
else:
cmd = 'codegen.'+os.uname()[0]+'-'+os.uname()[4]
else:
cmd = "codegen.windows.exe"
if not os.path.exists(cmd):
raise Exception("Codegen binary not found.")
command = cmd + " \"" + filename + "\" "
if start >= 0:
command = command + str(start) + " "
if duration >= 0:
command = command + str(duration)
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(json_block, errs) = p.communicate()
json_block = reallyUTF8(json_block)
try:
return json.loads(json_block)
except ValueError:
logger.debug("No JSON object came out of codegen: error was %s" % (errs))
return None
def callm(method, param_dict, POST=False, socket_timeout=None, data=None):
"""
Call the api!
Param_dict is a *regular* *python* *dictionary* so if you want to have multi-valued params
put them in a list.
** note, if we require 2.6, we can get rid of this timeout munging.
"""
param_dict['api_key'] = config.ECHO_NEST_API_KEY
param_list = []
if not socket_timeout:
socket_timeout = config.CALL_TIMEOUT
for key,val in param_dict.iteritems():
if isinstance(val, list):
param_list.extend( [(key,subval) for subval in val] )
else:
if isinstance(val, unicode):
val = val.encode('utf-8')
param_list.append( (key,val) )
params = urllib.urlencode(param_list)
socket.setdefaulttimeout(socket_timeout)
if(POST):
if (not method == 'track/upload') or ((method == 'track/upload') and 'url' in param_dict):
"""
this is a normal POST call
"""
url = 'http://%s/%s/%s/%s' % (config.API_HOST, config.API_SELECTOR,
config.API_VERSION, method)
if data is None:
data = ''
data = urllib.urlencode(data)
data = "&".join([data, params])
f = opener.open(url, data=data)
else:
"""
upload with a local file is special, as the body of the request is the content of the file,
and the other parameters stay on the URL
"""
url = '/%s/%s/%s?%s' % (config.API_SELECTOR, config.API_VERSION,
method, params)
if ':' in config.API_HOST:
host, port = config.API_HOST.split(':')
else:
host = config.API_HOST
port = 80
if config.TRACE_API_CALLS:
logger.info("%s/%s" % (host+':'+str(port), url,))
conn = httplib.HTTPConnection(host, port = port)
conn.request('POST', url, body = data, headers = dict([('Content-Type', 'application/octet-stream')]+headers))
f = conn.getresponse()
else:
"""
just a normal GET call
"""
url = 'http://%s/%s/%s/%s?%s' % (config.API_HOST, config.API_SELECTOR, config.API_VERSION,
method, params)
f = opener.open(url)
socket.setdefaulttimeout(None)
# try/except
response_dict = get_successful_response(f.read())
return response_dict
def postChunked(host, selector, fields, files):
"""
Attempt to replace postMultipart() with nearly-identical interface.
(The files tuple no longer requires the filename, and we only return
the response body.)
Uses the urllib2_file.py originally from
http://fabien.seisen.org which was also drawn heavily from
http://code.activestate.com/recipes/146306/ .
This urllib2_file.py is more desirable because of the chunked
uploading from a file pointer (no need to read entire file into
memory) and the ability to work from behind a proxy (due to its
basis on urllib2).
"""
params = urllib.urlencode(fields)
url = 'http://%s%s?%s' % (host, selector, params)
u = urllib2.urlopen(url, files)
result = u.read()
[fp.close() for (key, fp) in files]
return result
def fix(x):
# we need this to fix up all the dict keys to be strings, not unicode objects
assert(isinstance(x,dict))
return dict((str(k), v) for (k,v) in x.iteritems())
| |
import matplotlib.pyplot as plt
import sys
import os
import textwrap
#from TamuzApp.models import *
import gc
lib_path = os.path.abspath(r'E:\Tamuz\Utils\RobotQAUtils')
sys.path.append(lib_path)
from Utils.RobotQAUtils.plateReader import *
from Utils.RobotQAUtils.classes import *
from Utils.DBUtil.FilesIO.saveFileToModel import *
from matplotlib.font_manager import FontProperties
width = 0.35
plt.ioff()
fig = None
def printPipetorsReport(dilutionStatistic):
manualRead = dilutionStatistic.manualColumn.getMean()
robotRead =dilutionStatistic.getRobotMean()
N = len(dilutionStatistic.pipetors)
means = []
maxDeviation = []
for p in dilutionStatistic.pipetors:
means.append(p.getMeans())
maxDeviation.append(p.getMaxDeviationPercent(robotRead))
fig = plt.figure()
#fig.subplots_adjust(right=0.75)
ax = fig.add_subplot(111)
#par1 = ax.twinx()
#par1.spines["right"].set_position(("axes", float(1)))
#make_patch_spines_invisible(par1)
#par1.spines["right"].set_visible(True)
#par1.set_ylim(0, 100)
#ys1 =[manualRead]
#ys1.extend([0]*N)
#xs1 = range(N+1)
#ys1[:] = [x*100 for x in ys1]
#rects1 = ax.bar(xs1,ys1, width, color='g',align='center')
ys2 = [0]
ys2.extend(means)
#ys2[:] = [x*100 for x in ys2]
rects2 = ax.bar(xs1, ys2, width, color='y',align='center')
ys3 = [dilutionStatistic.manualColumn.getMaxDeviationPercent(manualRead)]
ys3.extend(maxDeviation)
xs3 = [x+width for x in range(N+1)]
#ys3[:] = [x/10.0 for x in ys3]
for idx,dev in enumerate(ys3):# normalize deviation values of the deviation percent to the size of manual reading
ys3[idx] = float(robotRead)*float(ys3[idx])
rects3 = ax.bar(xs3, ys3, width, color='r',align='center')
#rects4 = ax.bar([x+width for x in range(N)], deviationPercentDown, 0.1, color='b',align='center')
for i, w in enumerate(dilutionStatistic.pipetors[0].wells):
dils = []
dils.append(dilutionStatistic.manualColumn.wells[i].dilutionValue)
for p in dilutionStatistic.pipetors:
dils.append(p.wells[i].dilutionValue)
#dils[:] = [x*100 for x in dils]
p=plt.plot(range(N+1),dils, 'ro', label='dilution values')
ax.set_ylabel('reading')
ax.set_xlabel('pipetors')
ax.set_title('readings and volume of pipetors with '+str(dilutionStatistic.manualColumn.colorVolume)+' volume')
#labels= getColumnsLabels(dilutionStatistics)
rowLabels = plateReader.createRowLabels(length =N+1,name = '')
rowLabels.insert(0,'manual')
rowLabels=[textwrap.fill(text,15) for text in rowLabels]
rowLabels2 = []
for ind, num in enumerate(ys3):
n = round(num*100/robotRead)#unNormalize
rowLabels2.append(str(n)+' %')
plt.xticks(range(N+1),rowLabels)
plt.xticks( [x+width for x in range(N+1)],rowLabels2)
ax.legend( (rects1[0], rects2[0],rects3[0],p[0]), ('reading in manual pipeting', 'reading in robot pipeting','maximum deviation percent','dilution values') )
plt.show()
def printPipetorsCV(dilutionStatistic):
manualRead = plateReader.getVolumeFromReading(dilutionStatistic.manualColumn.getMean())
N = len(dilutionStatistic.pipetors)
means = []
cv = [plateReader.getCV(dilutionStatistic.manualColumn.wells,dilutionStatistic.manualColumn.getMean())]
for p in dilutionStatistic.pipetors:
means.append(plateReader.getVolumeFromReading(p.getMeans()))
cv.append(plateReader.getCV(p.wells,p.getMeans()))
#normalize all reads by manual read
#for i,m in enumerate(means):
# means[i] = plateReader.normalizeVolumeByManualRead(manualVolume=manualRead,volumeSupposeToBe =dilutionStatistic.manualColumn.colorVolume,actualVolume=m)
#manualRead = dilutionStatistic.manualColumn.colorVolume
fig = plt.figure()
ax = fig.subplot()
ys1 =[manualRead]
ys1.extend([0]*N)
xs1 = range(N+1)
rects1 = ax.bar(xs1,ys1, width, color='g',align='center')
ys2 = [0]
ys2.extend(means)
rects2 = ax.bar(xs1, ys2, width, color='y',align='center')
#ys3 = [plateReader.getCV(dilutionStatistic.manualColumn.wells,dilutionStatistic.manualColumn.getMean())]
ys3=cv
xs3 = [x+width for x in range(N+1)]
#for idx,c in enumerate(ys3):
# ys3[idx] = float(100*robotRead)*float(ys3[idx])
rects3 = ax.bar(xs3, ys3, width, color='r',align='center')
#rects4 = ax.bar([x+width for x in range(N)], deviationPercentDown, 0.1, color='b',align='center')
manvol =plateReader.getVolumeFromReading(dilutionStatistic.manualColumn.getMean())
volsupposeToBe = dilutionStatistic.manualColumn.colorVolume
normVal =volsupposeToBe - manvol
for i, w in enumerate(dilutionStatistic.pipetors[0].wells):
manvol =plateReader.getVolumeFromReading(dilutionStatistic.manualColumn.getMean())
volsupposeToBe = dilutionStatistic.manualColumn.colorVolume
normVal =volsupposeToBe - manvol
dils = []
dils.append(plateReader.getVolumeFromReading(dilutionStatistic.manualColumn.wells[i].dilutionValue))
#dils.append(plateReader.getVolumeFromReading(dilutionStatistic.manualColumn.wells[i].dilutionValue))
for j,p in enumerate(dilutionStatistic.pipetors):
robvol =means[j]
volsupposeToBe = dilutionStatistic.manualColumn.colorVolume
normVal =volsupposeToBe - robvol
dils.append(plateReader.getVolumeFromReading(p.wells[i].dilutionValue))
#dils.append(plateReader.getVolumeFromReading(p.wells[i].dilutionValue))
p=plt.plot(range(N+1),dils, 'ro', label='dilution values')
ax.set_ylabel('volume')
ax.set_xlabel('pipetors')
ax.set_title('readings and volume of pipetors with '+str(dilutionStatistic.manualColumn.colorVolume)+' volume')
#labels= getColumnsLabels(dilutionStatistics)
rowLabels = plateReader.createRowLabels(length =N+1,name = '')
rowLabels.insert(0,'manual')
rowLabels=[textwrap.fill(text,15) for text in rowLabels]
rowLabels2 = []
for ind, num in enumerate(ys3):
# n = round(num/robotRead)#unNormalize
rowLabels2.append(str(round(num*100)))
plt.xticks(range(N+1),rowLabels)
plt.xticks( [x+width for x in range(N+1)],rowLabels2)
ax.legend( ( rects2[0],rects3[0],p[0]), ( 'reading in robot pipeting','cv value','dilution values'))
plt.show()
def printPipetorsCVForWebbApp(robotExperiment,manualExcelFile,exp = None,plateReaderUsed = 'old'):
global fig
manualRead = plateReader.getVolumeFromReading(robotExperiment.getPlateODMean(robotExperiment.manualPlate),plateReaderUsed)
N = len(robotExperiment.pipetors)
means = []
manualWellList = []
for col in robotExperiment.manualPlate.columns:
manualWellList.extend(col.wells)
cv = [plateReader.getCV(manualWellList,robotExperiment.getPlateODMean(robotExperiment.manualPlate))]
for p in robotExperiment.pipetors:
means.append(plateReader.getVolumeFromReading(p.getMeans()))
cv.append(plateReader.getCV(p.wells,p.getMeans()))
if not fig:
fig = plt.figure()
ax = fig.add_subplot(111)
#ax = plt.subplot()
ys1 =[manualRead]
ys1.extend([0]*N)
xs1 = range(N+1)
rects1 = plt.bar(xs1,ys1, width, color='g',align='center')
ys2 = [0]
ys2.extend(means)
rects2 = plt.bar(xs1, ys2, width, color='y',align='center')
#ys3 = [plateReader.getCV(dilutionStatistic.manualColumn.wells,dilutionStatistic.manualColumn.getMean())]
ys3=cv
xs3 = [x+width for x in range(N+1)]
rects3 = plt.bar(xs3, ys3, width, color='r',align='center')
for i, w in enumerate(robotExperiment.pipetors[0].wells):
dils = []
if i < len(manualWellList):
dils.append(plateReader.getVolumeFromReading(manualWellList[i].dilutionValue))
else:
dils.append(manualRead)
for j,p in enumerate(robotExperiment.pipetors):
dils.append(plateReader.getVolumeFromReading(p.wells[i].dilutionValue))
p=plt.plot(range(N+1),dils, 'ro', label='dilution values')
plt.ylabel('volume')
plt.xlabel('pipetors')
plt.title('readings and volume of pipetors with '+str(robotExperiment.volume)+' volume')
rowLabels = plateReader.createRowLabels(length =N+1,name = '')
rowLabels.insert(0,'manual')
rowLabels=[textwrap.fill(text,15) for text in rowLabels]
rowLabels2 = []
for ind, num in enumerate(ys3):
# n = round(num/robotRead)#unNormalize
rowLabels2.append(str(round(num*100)))
plt.xticks(range(N+1),rowLabels)
plt.xticks( [x+width for x in range(N+1)],rowLabels2)
plt.legend( ( rects2[0],rects3[0],p[0]), ( 'reading in robot pipeting','cv value','dilution values'),bbox_to_anchor=(1, 0.5))
dir = getFileDir(str(manualExcelFile))
plt.savefig(dir+'/report.png',dpi=600)
saveFileToModel(exp,dir,'report.png',key = 'report1')
plt.close(fig)
plt.cla()
plt.clf()
gc.collect()
return dir+'report.png'
# plt.show()
def printSingleWellsPipetors(robotExperiment,manualExcelFile,exp = None,plateReaderUsed = 'old'):
global fig
manualWellList = []
for col in robotExperiment.manualPlate.columns:
manualWellList.extend(col.wells)
if not fig:
fig = plt.figure()
ax = fig.add_subplot(110)
#ax = plt.subplot()
matrix = []
ps = []
for i,p in enumerate(robotExperiment.pipetors):
matrix.append([])
for w in p.wells:#iterating each pipotor's well list
matrix[i].append(plateReader.getVolumeFromReading(w.dilutionValue,plateReaderUsed))
ps.append(plt.plot(range(len(matrix[i])),matrix[i], '-',label='p'+str(i+1)))
ax.set_ylabel('volume')
#plt.ylabel('volume')
#plt.xlabel('wells')
ax.set_xlabel('wells')
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), mode="expand", borderaxespad=0.,loc=3,ncol=8)
#plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), mode="expand", borderaxespad=0.,loc=3,ncol=8)
dir = getFileDir(str(manualExcelFile))
#fig.savefig(dir+'/pipetorsReport.png')
plt.savefig(dir+'/pipetorsReport.png')
saveFileToModel(exp,dir,'pipetorsReport.png',key = 'report2')
plt.close(fig)
plt.cla()
plt.clf()
gc.collect()
return dir+'pipetorsReport.png'
| |
import json
import math
import socket
import logging
import asyncio
import aiohttp
from enum import Enum
from random import uniform
from nyuki.services import Service
from nyuki.api import Response, resource
log = logging.getLogger(__name__)
class State(Enum):
UNKNOWN = 'unknown'
FOLLOWER = 'follower'
CANDIDATE = 'candidate'
LEADER = 'leader'
class Event(Enum):
ELECTED = 'elected'
DISMISSED = 'dismissed'
FAILURES = 'failures'
@resource('/raft', ['v1'], 'application/json')
class ApiRaft:
"""
This interface enables communication between members of a Raft cluster.
"""
async def put(self, request):
"""
Raft candidate request.
"""
proto = self.nyuki.raft
# If this instance has already voted for another one
if proto.voted_for:
return Response(
status=403,
body={'voted': proto.voted_for, 'instance': proto.uid}
)
# Local variables
data = await request.json()
proto.voted_for = data['candidate']
proto.term = data['term']
# Reset the timer
proto.set_timer(proto.candidate)
return Response(status=200, body={'instance': proto.uid})
async def post(self, request):
"""
Heartbeat endpoint.
"""
proto = self.nyuki.raft
data = await request.json()
suspicious = proto.suspicious
# Local variables
proto.state = State.FOLLOWER
proto.votes = 0
proto.voted_for = None
proto.log = data['log']
proto.suspicious.clear()
# Reset the timer
proto.set_timer(proto.candidate)
return Response(status=200, body={
'instance': proto.uid,
'suspicious': list(suspicious)
})
class RaftProtocol(Service):
"""
Leader election based on Raft distributed algorithm.
Paper: https://raft.github.io/raft.pdf
"""
HEARTBEAT = 1.0
TIMEOUT = (2.0, 3.5)
def __init__(self, nyuki):
self.service = nyuki.config['service']
self.loop = nyuki.loop or asyncio.get_event_loop()
self.uid = nyuki.id
self.ipv4 = socket.gethostbyname(socket.gethostname())
self.handlers = {event: set() for event in Event}
self.cluster = {}
self.suspicious = DanausSet(after=5, callback=self.failure_handler)
self.timer = None
self.state = State.UNKNOWN
self.term = -1
self.votes = -1
self.voted_for = None
self.majority = math.inf
self.log = {}
@property
def network(self):
return {**self.cluster, self.ipv4: self.uid}
def configure(self, *args, **kwargs):
pass
def register(self, etype, callback):
self.handlers[Event(etype)].add(callback)
def set_timer(self, cb, factor=1):
"""
Set or reset a unique timer.
"""
if self.timer:
self.timer.cancel()
self.timer = self.loop.call_later(
uniform(*self.TIMEOUT) * factor, asyncio.ensure_future, cb()
)
@staticmethod
async def request(ipv4, method, data=None):
"""
Utility method to perform HTTP requests, Raft-specific, to an instance.
"""
request = {
'url': 'http://{host}:5558/v1/raft'.format(host=ipv4),
'headers': {'Content-Type': 'application/json'},
'data': json.dumps(data or {})
}
try:
async with aiohttp.ClientSession() as session:
http_method = getattr(session, method)
async with http_method(**request) as resp:
if resp.status != 200:
return
return await resp.json()
except (aiohttp.ClientError, ConnectionError):
return
async def start(self, *args, **kwargs):
"""
Starts the protocol as a follower instance.
"""
self.state = State.FOLLOWER
self.term = 0
self.votes = 0
# Won't bootstrap the timer here to avoid any unwanted early election
async def stop(self, *args, **kwargs):
"""
Stops the protocol by cancelling the current timer.
"""
self.state = State.FOLLOWER
if self.timer:
self.timer.cancel()
async def discovery_handler(self, addresses):
"""
The discovery service provides updates periodically.
"""
cluster = {ipv4: self.cluster.get(ipv4) for ipv4 in addresses}
if self.ipv4 in cluster:
del cluster[self.ipv4]
else:
log.warning("This instance isn't part of the discovery results")
await self.stop()
return
# Check differences
added = set(cluster.keys()) - set(self.cluster.keys())
self.suspicious.update([
(ipv4, self.cluster[ipv4] or self.log.get(ipv4))
for ipv4 in set(self.cluster.keys()) - set(cluster.keys())
])
self.cluster = cluster
if self.state is State.LEADER:
# Schedule HB for new workers
for ipv4 in added:
asyncio.ensure_future(self.heartbeat(ipv4))
elif self.state is State.FOLLOWER and not self.timer:
# The protocol has started but the timer needs to be bootstraped
# Initial factor for the timer is higher (discovery reasons)
self.set_timer(self.candidate, 5)
async def failure_handler(self, instances):
"""
Handle suspicous instances.
"""
failing = [uid for ipv4, uid in instances if uid is not None]
if not failing:
return
for callback in self.handlers[Event.FAILURES]:
asyncio.ensure_future(callback(failing))
async def candidate(self):
"""
Election timer went out, this instance considers itself as a candidate.
"""
cluster_size = len(self.cluster) + 1
# Promote itself as leader if alone
if cluster_size == 1:
await self.promote()
return
# Local variables
self.state = State.CANDIDATE
self.term += 1
self.votes = 1
self.voted_for = self.uid
self.majority = int(math.floor(cluster_size / 2) + 1)
# Init the timer, retry vote if timeout
log.debug("Instance is candidate (requires %d votes)", self.majority)
self.set_timer(self.candidate)
# Start the election
for ipv4 in self.cluster:
asyncio.ensure_future(self.request_vote(ipv4, self.term))
async def promote(self):
"""
Promote this instance to the rank of leader.
"""
log.info("Leader elected of the service '%s'", self.service)
# Local variables
if self.timer:
self.timer.cancel()
self.state = State.LEADER
self.votes = 0
self.voted_for = None
# Use the log to restore ipv4-to-uid mapping
for ipv4, uid in self.log.items():
if self.cluster.get(ipv4):
self.cluster[ipv4] = uid
# Sending heartbeats to the cluster
for ipv4 in self.cluster:
asyncio.ensure_future(self.heartbeat(ipv4))
async def request_vote(self, ipv4, term):
"""
Request a vote from an instance.
"""
vote = await self.request(ipv4, 'put', {
'candidate': self.uid, 'term': term
})
if (
# Won't count negative feedbacks
not vote or
# Ignore the vote if the election is over
self.term != term or self.loop.time() >= self.timer._when or
# Not in a candidate anymore
self.state is not State.CANDIDATE
):
return
# Count vote
self.cluster[ipv4] = vote['instance']
self.votes += 1
# The instance becomes a leader
if self.votes >= self.majority:
await self.promote()
async def heartbeat(self, ipv4):
"""
Send a heartbeat to reset instance's timer.
"""
if (
# Won't send HB if the instance is not the leader anymore
self.state is not State.LEADER or
# Won't send HB if he instances is not in the cluster
ipv4 not in self.cluster
):
return
# Schedule the next HB
self.loop.call_later(
self.HEARTBEAT, asyncio.ensure_future, self.heartbeat(ipv4)
)
# Heartbeats allow to refresh follower's timers and to replicate logs
response = await self.request(ipv4, 'post', {
'leader': self.uid,
'log': self.network
})
# Empty answer or no response is suspicious
uid = self.cluster.get(ipv4)
if not response:
self.suspicious.add((ipv4, uid))
return
# An instance isn't referenced under the same ID anymore
if uid and uid != response['instance']:
self.suspicious.add((ipv4, uid))
self.cluster[ipv4] = response['instance']
# Collect suspicious instances from heartbeat's response
self.suspicious.update(
[tuple(entry) for entry in response['suspicious']]
)
class DanausSet(set):
"""
Named after the myth of the daughters of Danaus.
This set can be filled up but will eventually be emptied (upon timeout).
"""
def __init__(self, seq=(), *, after=1, callback=None, loop=None):
super().__init__(seq)
self.loop = loop or asyncio.get_event_loop()
self.after = after
self.callback = callback
self._timer = None
def abort(self):
if self._timer:
self._timer.cancel()
self._timer = None
def add(self, item):
if item not in self:
self._schedule()
super().add(item)
def update(self, seq):
if set(seq) - self:
self._schedule()
super().update(seq)
def clear(self):
super().clear()
self.abort()
def _schedule(self):
self.abort()
self._timer = self.loop.call_later(self.after, self._empty)
def _empty(self):
cleared = self.copy()
self.clear()
self._timer = None
if cleared and self.callback:
cb = self.callback
if not asyncio.iscoroutine(self.callback):
cb = asyncio.coroutine(cb)
asyncio.ensure_future(cb(cleared))
| |
"""Elliptical geometrical entities.
Contains
* Ellipse
* Circle
"""
from __future__ import division, print_function
from sympy.core import S, pi, sympify
from sympy.core.logic import fuzzy_bool
from sympy.core.numbers import Rational, oo
from sympy.core.compatibility import range
from sympy.core.symbol import Dummy
from sympy.simplify import simplify, trigsimp
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.geometry.exceptions import GeometryError
from sympy.polys import DomainError, Poly, PolynomialError
from sympy.polys.polyutils import _not_a_coeff, _nsort
from sympy.solvers import solve
from sympy.utilities.iterables import uniq
from sympy.utilities.misc import filldedent
from sympy.utilities.decorator import doctest_depends_on
from .entity import GeometryEntity, GeometrySet
from .point import Point
from .line import Line, LinearEntity
from .util import _symbol, idiff
import random
class Ellipse(GeometrySet):
"""An elliptical GeometryEntity.
Parameters
==========
center : Point, optional
Default value is Point(0, 0)
hradius : number or SymPy expression, optional
vradius : number or SymPy expression, optional
eccentricity : number or SymPy expression, optional
Two of `hradius`, `vradius` and `eccentricity` must be supplied to
create an Ellipse. The third is derived from the two supplied.
Attributes
==========
center
hradius
vradius
area
circumference
eccentricity
periapsis
apoapsis
focus_distance
foci
Raises
======
GeometryError
When `hradius`, `vradius` and `eccentricity` are incorrectly supplied
as parameters.
TypeError
When `center` is not a Point.
See Also
========
Circle
Notes
-----
Constructed from a center and two radii, the first being the horizontal
radius (along the x-axis) and the second being the vertical radius (along
the y-axis).
When symbolic value for hradius and vradius are used, any calculation that
refers to the foci or the major or minor axis will assume that the ellipse
has its major radius on the x-axis. If this is not true then a manual
rotation is necessary.
Examples
========
>>> from sympy import Ellipse, Point, Rational
>>> e1 = Ellipse(Point(0, 0), 5, 1)
>>> e1.hradius, e1.vradius
(5, 1)
>>> e2 = Ellipse(Point(3, 1), hradius=3, eccentricity=Rational(4, 5))
>>> e2
Ellipse(Point2D(3, 1), 3, 9/5)
Plotting:
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import Circle, Segment
>>> c1 = Circle(Point(0,0), 1)
>>> Plot(c1) # doctest: +SKIP
[0]: cos(t), sin(t), 'mode=parametric'
>>> p = Plot() # doctest: +SKIP
>>> p[0] = c1 # doctest: +SKIP
>>> radius = Segment(c1.center, c1.random_point())
>>> p[1] = radius # doctest: +SKIP
>>> p # doctest: +SKIP
[0]: cos(t), sin(t), 'mode=parametric'
[1]: t*cos(1.546086215036205357975518382),
t*sin(1.546086215036205357975518382), 'mode=parametric'
"""
def __new__(
cls, center=None, hradius=None, vradius=None, eccentricity=None,
**kwargs):
hradius = sympify(hradius)
vradius = sympify(vradius)
eccentricity = sympify(eccentricity)
if center is None:
center = Point(0, 0)
else:
center = Point(center)
if len(center) != 2:
raise ValueError('The center of "{0}" must be a two dimensional point'.format(cls))
if len(list(filter(None, (hradius, vradius, eccentricity)))) != 2:
raise ValueError('Exactly two arguments of "hradius", '
'"vradius", and "eccentricity" must not be None."')
if eccentricity is not None:
if hradius is None:
hradius = vradius / sqrt(1 - eccentricity**2)
elif vradius is None:
vradius = hradius * sqrt(1 - eccentricity**2)
if hradius == vradius:
return Circle(center, hradius, **kwargs)
return GeometryEntity.__new__(cls, center, hradius, vradius, **kwargs)
@property
def ambient_dimension(self):
return 2
@property
def center(self):
"""The center of the ellipse.
Returns
=======
center : number
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.center
Point2D(0, 0)
"""
return self.args[0]
@property
def hradius(self):
"""The horizontal radius of the ellipse.
Returns
=======
hradius : number
See Also
========
vradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.hradius
3
"""
return self.args[1]
@property
def vradius(self):
"""The vertical radius of the ellipse.
Returns
=======
vradius : number
See Also
========
hradius, major, minor
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.vradius
1
"""
return self.args[2]
@property
def minor(self):
"""Shorter axis of the ellipse (if it can be determined) else vradius.
Returns
=======
minor : number or expression
See Also
========
hradius, vradius, major
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.minor
1
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).minor
b
>>> Ellipse(p1, b, a).minor
a
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).minor
m
"""
ab = self.args[1:3]
if len(ab) == 1:
return ab[0]
a, b = ab
o = a - b < 0
if o == True:
return a
elif o == False:
return b
return self.vradius
@property
def major(self):
"""Longer axis of the ellipse (if it can be determined) else hradius.
Returns
=======
major : number or expression
See Also
========
hradius, vradius, minor
Examples
========
>>> from sympy import Point, Ellipse, Symbol
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.major
3
>>> a = Symbol('a')
>>> b = Symbol('b')
>>> Ellipse(p1, a, b).major
a
>>> Ellipse(p1, b, a).major
b
>>> m = Symbol('m')
>>> M = m + 1
>>> Ellipse(p1, m, M).major
m + 1
"""
ab = self.args[1:3]
if len(ab) == 1:
return ab[0]
a, b = ab
o = b - a < 0
if o == True:
return a
elif o == False:
return b
return self.hradius
@property
def area(self):
"""The area of the ellipse.
Returns
=======
area : number
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.area
3*pi
"""
return simplify(S.Pi * self.hradius * self.vradius)
@property
def circumference(self):
"""The circumference of the ellipse.
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.circumference
12*Integral(sqrt((-8*_x**2/9 + 1)/(-_x**2 + 1)), (_x, 0, 1))
"""
from sympy import Integral
if self.eccentricity == 1:
return 2*pi*self.hradius
else:
x = Dummy('x', real=True)
return 4*self.major*Integral(
sqrt((1 - (self.eccentricity*x)**2)/(1 - x**2)), (x, 0, 1))
@property
def eccentricity(self):
"""The eccentricity of the ellipse.
Returns
=======
eccentricity : number
Examples
========
>>> from sympy import Point, Ellipse, sqrt
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, sqrt(2))
>>> e1.eccentricity
sqrt(7)/3
"""
return self.focus_distance / self.major
@property
def periapsis(self):
"""The periapsis of the ellipse.
The shortest distance between the focus and the contour.
Returns
=======
periapsis : number
See Also
========
apoapsis : Returns greatest distance between focus and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.periapsis
-2*sqrt(2) + 3
"""
return self.major * (1 - self.eccentricity)
@property
def apoapsis(self):
"""The apoapsis of the ellipse.
The greatest distance between the focus and the contour.
Returns
=======
apoapsis : number
See Also
========
periapsis : Returns shortest distance between foci and contour
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.apoapsis
2*sqrt(2) + 3
"""
return self.major * (1 + self.eccentricity)
@property
def focus_distance(self):
"""The focale distance of the ellipse.
The distance between the center and one focus.
Returns
=======
focus_distance : number
See Also
========
foci
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.focus_distance
2*sqrt(2)
"""
return Point.distance(self.center, self.foci[0])
@property
def foci(self):
"""The foci of the ellipse.
Notes
-----
The foci can only be calculated if the major/minor axes are known.
Raises
======
ValueError
When the major and minor axis cannot be determined.
See Also
========
sympy.geometry.point.Point
focus_distance : Returns the distance between focus and center
Examples
========
>>> from sympy import Point, Ellipse
>>> p1 = Point(0, 0)
>>> e1 = Ellipse(p1, 3, 1)
>>> e1.foci
(Point2D(-2*sqrt(2), 0), Point2D(2*sqrt(2), 0))
"""
c = self.center
hr, vr = self.hradius, self.vradius
if hr == vr:
return (c, c)
# calculate focus distance manually, since focus_distance calls this
# routine
fd = sqrt(self.major**2 - self.minor**2)
if hr == self.minor:
# foci on the y-axis
return (c + Point(0, -fd), c + Point(0, fd))
elif hr == self.major:
# foci on the x-axis
return (c + Point(-fd, 0), c + Point(fd, 0))
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
h, v = self.hradius, self.vradius
return (self.center.x - h, self.center.y - v, self.center.x + h, self.center.y + v)
def rotate(self, angle=0, pt=None):
"""Rotate ``angle`` radians counterclockwise about Point ``pt``.
Note: since the general ellipse is not supported, only rotations that
are integer multiples of pi/2 are allowed.
Examples
========
>>> from sympy import Ellipse, pi
>>> Ellipse((1, 0), 2, 1).rotate(pi/2)
Ellipse(Point2D(0, 1), 1, 2)
>>> Ellipse((1, 0), 2, 1).rotate(pi)
Ellipse(Point2D(-1, 0), 2, 1)
"""
if self.hradius == self.vradius:
return self.func(*self.args)
if (angle/S.Pi).is_integer:
return super(Ellipse, self).rotate(angle, pt)
if (2*angle/S.Pi).is_integer:
return self.func(self.center.rotate(angle, pt), self.vradius, self.hradius)
# XXX see https://github.com/sympy/sympy/issues/2815 for general ellipes
raise NotImplementedError('Only rotations of pi/2 are currently supported for Ellipse.')
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since it is the major and minor
axes which must be scaled and they are not GeometryEntities.
Examples
========
>>> from sympy import Ellipse
>>> Ellipse((0, 0), 2, 1).scale(2, 4)
Circle(Point2D(0, 0), 4)
>>> Ellipse((0, 0), 2, 1).scale(2)
Ellipse(Point2D(0, 0), 4, 1)
"""
c = self.center
if pt:
pt = Point(pt)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
h = self.hradius
v = self.vradius
return self.func(c.scale(x, y), hradius=h*x, vradius=v*y)
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point2D(1, 0), -1)
>>> from sympy import Ellipse, Line, Point
>>> Ellipse(Point(3, 4), 1, 3).reflect(Line(Point(0, -4), Point(5, 0)))
Traceback (most recent call last):
...
NotImplementedError:
General Ellipse is not supported but the equation of the reflected
Ellipse is given by the zeros of: f(x, y) = (9*x/41 + 40*y/41 +
37/41)**2 + (40*x/123 - 3*y/41 - 364/123)**2 - 1
Notes
=====
Until the general ellipse (with no axis parallel to the x-axis) is
supported a NotImplemented error is raised and the equation whose
zeros define the rotated ellipse is given.
"""
from .util import _uniquely_named_symbol
if line.slope in (0, oo):
c = self.center
c = c.reflect(line)
return self.func(c, -self.hradius, self.vradius)
else:
x, y = [_uniquely_named_symbol(name, self, line) for name in 'xy']
expr = self.equation(x, y)
p = Point(x, y).reflect(line)
result = expr.subs(zip((x, y), p.args
), simultaneous=True)
raise NotImplementedError(filldedent(
'General Ellipse is not supported but the equation '
'of the reflected Ellipse is given by the zeros of: ' +
"f(%s, %s) = %s" % (str(x), str(y), str(result))))
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
-----
Being on the border of self is considered False.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Ellipse, S
>>> from sympy.abc import t
>>> e = Ellipse((0, 0), 3, 2)
>>> e.encloses_point((0, 0))
True
>>> e.encloses_point(e.arbitrary_point(t).subs(t, S.Half))
False
>>> e.encloses_point((4, 0))
False
"""
p = Point(p)
if p in self:
return False
if len(self.foci) == 2:
# if the combined distance from the foci to p (h1 + h2) is less
# than the combined distance from the foci to the minor axis
# (which is the same as the major axis length) then p is inside
# the ellipse
h1, h2 = [f.distance(p) for f in self.foci]
test = 2*self.major - (h1 + h2)
else:
test = self.radius - self.center.distance(p)
return fuzzy_bool(test.is_positive)
@doctest_depends_on(modules=('pyglet',))
def tangent_lines(self, p):
"""Tangent lines between `p` and the ellipse.
If `p` is on the ellipse, returns the tangent line through point `p`.
Otherwise, returns the tangent line(s) from `p` to the ellipse, or
None if no tangent line is possible (e.g., `p` inside ellipse).
Parameters
==========
p : Point
Returns
=======
tangent_lines : list with 1 or 2 Lines
Raises
======
NotImplementedError
Can only find tangent lines for a point, `p`, on the ellipse.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Line
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.tangent_lines(Point(3, 0))
[Line(Point2D(3, 0), Point2D(3, -12))]
>>> # This will plot an ellipse together with a tangent line.
>>> from sympy.plotting.pygletplot import PygletPlot as Plot
>>> from sympy import Point, Ellipse
>>> e = Ellipse(Point(0,0), 3, 2)
>>> t = e.tangent_lines(e.random_point())
>>> p = Plot()
>>> p[0] = e # doctest: +SKIP
>>> p[1] = t # doctest: +SKIP
"""
p = Point(p)
if self.encloses_point(p):
return []
if p in self:
delta = self.center - p
rise = (self.vradius ** 2)*delta.x
run = -(self.hradius ** 2)*delta.y
p2 = Point(simplify(p.x + run),
simplify(p.y + rise))
return [Line(p, p2)]
else:
if len(self.foci) == 2:
f1, f2 = self.foci
maj = self.hradius
test = (2*maj -
Point.distance(f1, p) -
Point.distance(f2, p))
else:
test = self.radius - Point.distance(self.center, p)
if test.is_number and test.is_positive:
return []
# else p is outside the ellipse or we can't tell. In case of the
# latter, the solutions returned will only be valid if
# the point is not inside the ellipse; if it is, nan will result.
x, y = Dummy('x'), Dummy('y')
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
slope = Line(p, Point(x, y)).slope
tangent_points = solve([slope - dydx, eq], [x, y])
# handle horizontal and vertical tangent lines
if len(tangent_points) == 1:
assert tangent_points[0][
0] == p.x or tangent_points[0][1] == p.y
return [Line(p, p + Point(1, 0)), Line(p, p + Point(0, 1))]
# others
return [Line(p, tangent_points[0]), Line(p, tangent_points[1])]
def is_tangent(self, o):
"""Is `o` tangent to the ellipse?
Parameters
==========
o : GeometryEntity
An Ellipse, LinearEntity or Polygon
Raises
======
NotImplementedError
When the wrong type of argument is supplied.
Returns
=======
is_tangent: boolean
True if o is tangent to the ellipse, False otherwise.
See Also
========
tangent_lines
Examples
========
>>> from sympy import Point, Ellipse, Line
>>> p0, p1, p2 = Point(0, 0), Point(3, 0), Point(3, 3)
>>> e1 = Ellipse(p0, 3, 2)
>>> l1 = Line(p1, p2)
>>> e1.is_tangent(l1)
True
"""
inter = None
if isinstance(o, Ellipse):
inter = self.intersection(o)
if isinstance(inter, Ellipse):
return False
return (inter is not None and len(inter) == 1
and isinstance(inter[0], Point))
elif isinstance(o, LinearEntity):
inter = self._do_line_intersection(o)
if inter is not None and len(inter) == 1:
return inter[0] in o
else:
return False
elif isinstance(o, Polygon):
c = 0
for seg in o.sides:
inter = self._do_line_intersection(seg)
c += len([True for point in inter if point in seg])
return c == 1
else:
raise NotImplementedError("Unknown argument type")
def normal_lines(self, p, prec=None):
"""Normal lines between `p` and the ellipse.
Parameters
==========
p : Point
Returns
=======
normal_lines : list with 1, 2 or 4 Lines
Examples
========
>>> from sympy import Line, Point, Ellipse
>>> e = Ellipse((0, 0), 2, 3)
>>> c = e.center
>>> e.normal_lines(c + Point(1, 0))
[Line(Point2D(0, 0), Point2D(1, 0))]
>>> e.normal_lines(c)
[Line(Point2D(0, 0), Point2D(0, 1)), Line(Point2D(0, 0), Point2D(1, 0))]
Off-axis points require the solution of a quartic equation. This
often leads to very large expressions that may be of little practical
use. An approximate solution of `prec` digits can be obtained by
passing in the desired value:
>>> e.normal_lines((3, 3), prec=2)
[Line(Point2D(-38/47, -85/31), Point2D(9/47, -21/17)),
Line(Point2D(19/13, -43/21), Point2D(32/13, -8/3))]
Whereas the above solution has an operation count of 12, the exact
solution has an operation count of 2020.
"""
p = Point(p)
# XXX change True to something like self.angle == 0 if the arbitrarily
# rotated ellipse is introduced.
# https://github.com/sympy/sympy/issues/2815)
if True:
rv = []
if p.x == self.center.x:
rv.append(Line(self.center, slope=oo))
if p.y == self.center.y:
rv.append(Line(self.center, slope=0))
if rv:
# at these special orientations of p either 1 or 2 normals
# exist and we are done
return rv
# find the 4 normal points and construct lines through them with
# the corresponding slope
x, y = Dummy('x', real=True), Dummy('y', real=True)
eq = self.equation(x, y)
dydx = idiff(eq, y, x)
norm = -1/dydx
slope = Line(p, (x, y)).slope
seq = slope - norm
yis = solve(seq, y)[0]
xeq = eq.subs(y, yis).as_numer_denom()[0].expand()
if len(xeq.free_symbols) == 1:
try:
# this is so much faster, it's worth a try
xsol = Poly(xeq, x).real_roots()
except (DomainError, PolynomialError, NotImplementedError):
xsol = _nsort(solve(xeq, x), separated=True)[0]
points = [Point(i, solve(eq.subs(x, i), y)[0]) for i in xsol]
else:
raise NotImplementedError(
'intersections for the general ellipse are not supported')
slopes = [norm.subs(zip((x, y), pt.args)) for pt in points]
if prec is not None:
points = [pt.n(prec) for pt in points]
slopes = [i if _not_a_coeff(i) else i.n(prec) for i in slopes]
return [Line(pt, slope=s) for pt,s in zip(points, slopes)]
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
arbitrary_point : Point
Raises
======
ValueError
When `parameter` already appears in the functions.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.arbitrary_point()
Point2D(3*cos(t), 2*sin(t))
"""
t = _symbol(parameter)
if t.name in (f.name for f in self.free_symbols):
raise ValueError(filldedent('Symbol %s already appears in object '
'and cannot be used as a parameter.' % t.name))
return Point(self.center.x + self.hradius*cos(t),
self.center.y + self.vradius*sin(t))
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the Ellipse.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.plot_interval()
[t, -pi, pi]
"""
t = _symbol(parameter)
return [t, -S.Pi, S.Pi]
def random_point(self, seed=None):
"""A random point on the ellipse.
Returns
=======
point : Point
See Also
========
sympy.geometry.point.Point
arbitrary_point : Returns parameterized point on ellipse
Notes
-----
A random point may not appear to be on the ellipse, ie, `p in e` may
return False. This is because the coordinates of the point will be
floating point values, and when these values are substituted into the
equation for the ellipse the result may not be zero because of floating
point rounding error.
Examples
========
>>> from sympy import Point, Ellipse, Segment
>>> e1 = Ellipse(Point(0, 0), 3, 2)
>>> e1.random_point() # gives some random point
Point2D(...)
>>> p1 = e1.random_point(seed=0); p1.n(2)
Point2D(2.1, 1.4)
The random_point method assures that the point will test as being
in the ellipse:
>>> p1 in e1
True
Notes
=====
An arbitrary_point with a random value of t substituted into it may
not test as being on the ellipse because the expression tested that
a point is on the ellipse doesn't simplify to zero and doesn't evaluate
exactly to zero:
>>> from sympy.abc import t
>>> e1.arbitrary_point(t)
Point2D(3*cos(t), 2*sin(t))
>>> p2 = _.subs(t, 0.1)
>>> p2 in e1
False
Note that arbitrary_point routine does not take this approach. A value
for cos(t) and sin(t) (not t) is substituted into the arbitrary point.
There is a small chance that this will give a point that will not
test as being in the ellipse, so the process is repeated (up to 10
times) until a valid point is obtained.
"""
from sympy import sin, cos, Rational
t = _symbol('t')
x, y = self.arbitrary_point(t).args
# get a random value in [-1, 1) corresponding to cos(t)
# and confirm that it will test as being in the ellipse
if seed is not None:
rng = random.Random(seed)
else:
rng = random
for i in range(10): # should be enough?
# simplify this now or else the Float will turn s into a Float
c = 2*Rational(rng.random()) - 1
s = sqrt(1 - c**2)
p1 = Point(x.subs(cos(t), c), y.subs(sin(t), s))
if p1 in self:
return p1
raise GeometryError(
'Having problems generating a point in the ellipse.')
def equation(self, x='x', y='y'):
"""The equation of the ellipse.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : sympy expression
See Also
========
arbitrary_point : Returns parameterized point on ellipse
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> e1.equation()
y**2/4 + (x/3 - 1/3)**2 - 1
"""
x = _symbol(x)
y = _symbol(y)
t1 = ((x - self.center.x) / self.hradius)**2
t2 = ((y - self.center.y) / self.vradius)**2
return t1 + t2 - 1
def _do_line_intersection(self, o):
"""
Find the intersection of a LinearEntity and the ellipse.
All LinearEntities are treated as a line and filtered at
the end to see that they lie in o.
"""
hr_sq = self.hradius ** 2
vr_sq = self.vradius ** 2
lp = o.points
ldir = lp[1] - lp[0]
diff = lp[0] - self.center
mdir = Point(ldir.x/hr_sq, ldir.y/vr_sq)
mdiff = Point(diff.x/hr_sq, diff.y/vr_sq)
a = ldir.dot(mdir)
b = ldir.dot(mdiff)
c = diff.dot(mdiff) - 1
det = simplify(b*b - a*c)
result = []
if det == 0:
t = -b / a
result.append(lp[0] + (lp[1] - lp[0]) * t)
# Definite and potential symbolic intersections are allowed.
elif (det > 0) != False:
root = sqrt(det)
t_a = (-b - root) / a
t_b = (-b + root) / a
result.append( lp[0] + (lp[1] - lp[0]) * t_a )
result.append( lp[0] + (lp[1] - lp[0]) * t_b )
return [r for r in result if r in o]
def _do_ellipse_intersection(self, o):
"""The intersection of an ellipse with another ellipse or a circle.
Private helper method for `intersection`.
"""
x = Dummy('x', real=True)
y = Dummy('y', real=True)
seq = self.equation(x, y)
oeq = o.equation(x, y)
result = solve([seq, oeq], [x, y])
return [Point(*r) for r in list(uniq(result))]
def intersection(self, o):
"""The intersection of this ellipse and another geometrical entity
`o`.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntity objects
Notes
-----
Currently supports intersections with Point, Line, Segment, Ray,
Circle and Ellipse types.
See Also
========
sympy.geometry.entity.GeometryEntity
Examples
========
>>> from sympy import Ellipse, Point, Line, sqrt
>>> e = Ellipse(Point(0, 0), 5, 7)
>>> e.intersection(Point(0, 0))
[]
>>> e.intersection(Point(5, 0))
[Point2D(5, 0)]
>>> e.intersection(Line(Point(0,0), Point(0, 1)))
[Point2D(0, -7), Point2D(0, 7)]
>>> e.intersection(Line(Point(5,0), Point(5, 1)))
[Point2D(5, 0)]
>>> e.intersection(Line(Point(6,0), Point(6, 1)))
[]
>>> e = Ellipse(Point(-1, 0), 4, 3)
>>> e.intersection(Ellipse(Point(1, 0), 4, 3))
[Point2D(0, -3*sqrt(15)/4), Point2D(0, 3*sqrt(15)/4)]
>>> e.intersection(Ellipse(Point(5, 0), 4, 3))
[Point2D(2, -3*sqrt(7)/4), Point2D(2, 3*sqrt(7)/4)]
>>> e.intersection(Ellipse(Point(100500, 0), 4, 3))
[]
>>> e.intersection(Ellipse(Point(0, 0), 3, 4))
[Point2D(-363/175, -48*sqrt(111)/175), Point2D(-363/175, 48*sqrt(111)/175), Point2D(3, 0)]
>>> e.intersection(Ellipse(Point(-1, 0), 3, 4))
[Point2D(-17/5, -12/5), Point2D(-17/5, 12/5), Point2D(7/5, -12/5), Point2D(7/5, 12/5)]
"""
if isinstance(o, Point):
if o in self:
return [o]
else:
return []
elif isinstance(o, LinearEntity):
# LinearEntity may be a ray/segment, so check the points
# of intersection for coincidence first
return self._do_line_intersection(o)
elif isinstance(o, Circle):
return self._do_ellipse_intersection(o)
elif isinstance(o, Ellipse):
if o == self:
return self
else:
return self._do_ellipse_intersection(o)
return o.intersection(self)
def evolute(self, x='x', y='y'):
"""The equation of evolute of the ellipse.
Parameters
==========
x : str, optional
Label for the x-axis. Default value is 'x'.
y : str, optional
Label for the y-axis. Default value is 'y'.
Returns
=======
equation : sympy expression
Examples
========
>>> from sympy import Point, Ellipse
>>> e1 = Ellipse(Point(1, 0), 3, 2)
>>> e1.evolute()
2**(2/3)*y**(2/3) + (3*x - 3)**(2/3) - 5**(2/3)
"""
if len(self.args) != 3:
raise NotImplementedError('Evolute of arbitrary Ellipse is not supported.')
x = _symbol(x)
y = _symbol(y)
t1 = (self.hradius*(x - self.center.x))**Rational(2, 3)
t2 = (self.vradius*(y - self.center.y))**Rational(2, 3)
return t1 + t2 - (self.hradius**2 - self.vradius**2)**Rational(2, 3)
def __eq__(self, o):
"""Is the other GeometryEntity the same as this ellipse?"""
return isinstance(o, GeometryEntity) and (self.center == o.center and
self.hradius == o.hradius and
self.vradius == o.vradius)
def __hash__(self):
return super(Ellipse, self).__hash__()
def __contains__(self, o):
if isinstance(o, Point):
x = Dummy('x', real=True)
y = Dummy('y', real=True)
res = self.equation(x, y).subs({x: o.x, y: o.y})
return trigsimp(simplify(res)) is S.Zero
elif isinstance(o, Ellipse):
return self == o
return False
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG ellipse element for the Ellipse.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
from sympy.core.evalf import N
c = N(self.center)
h, v = N(self.hradius), N(self.vradius)
return (
'<ellipse fill="{1}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" cx="{2}" cy="{3}" rx="{4}" ry="{5}"/>'
).format(2. * scale_factor, fill_color, c.x, c.y, h, v)
class Circle(Ellipse):
"""A circle in space.
Constructed simply from a center and a radius, or from three
non-collinear points.
Parameters
==========
center : Point
radius : number or sympy expression
points : sequence of three Points
Attributes
==========
radius (synonymous with hradius, vradius, major and minor)
circumference
equation
Raises
======
GeometryError
When trying to construct circle from three collinear points.
When trying to construct circle from incorrect parameters.
See Also
========
Ellipse, sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Circle
>>> # a circle constructed from a center and radius
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.hradius, c1.vradius, c1.radius
(5, 5, 5)
>>> # a circle costructed from three points
>>> c2 = Circle(Point(0, 0), Point(1, 1), Point(1, 0))
>>> c2.hradius, c2.vradius, c2.radius, c2.center
(sqrt(2)/2, sqrt(2)/2, sqrt(2)/2, Point2D(1/2, 1/2))
"""
def __new__(cls, *args, **kwargs):
c, r = None, None
if len(args) == 3:
args = [Point(a) for a in args]
if Point.is_collinear(*args):
raise GeometryError(
"Cannot construct a circle from three collinear points")
from .polygon import Triangle
t = Triangle(*args)
c = t.circumcenter
r = t.circumradius
elif len(args) == 2:
# Assume (center, radius) pair
c = Point(args[0])
r = sympify(args[1])
if not (c is None or r is None):
return GeometryEntity.__new__(cls, c, r, **kwargs)
raise GeometryError("Circle.__new__ received unknown arguments")
@property
def radius(self):
"""The radius of the circle.
Returns
=======
radius : number or sympy expression
See Also
========
Ellipse.major, Ellipse.minor, Ellipse.hradius, Ellipse.vradius
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.radius
6
"""
return self.args[1]
@property
def vradius(self):
"""
This Ellipse property is an alias for the Circle's radius.
Whereas hradius, major and minor can use Ellipse's conventions,
the vradius does not exist for a circle. It is always a positive
value in order that the Circle, like Polygons, will have an
area that can be positive or negative as determined by the sign
of the hradius.
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.vradius
6
"""
return abs(self.radius)
@property
def circumference(self):
"""The circumference of the circle.
Returns
=======
circumference : number or SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(3, 4), 6)
>>> c1.circumference
12*pi
"""
return 2 * S.Pi * self.radius
def equation(self, x='x', y='y'):
"""The equation of the circle.
Parameters
==========
x : str or Symbol, optional
Default value is 'x'.
y : str or Symbol, optional
Default value is 'y'.
Returns
=======
equation : SymPy expression
Examples
========
>>> from sympy import Point, Circle
>>> c1 = Circle(Point(0, 0), 5)
>>> c1.equation()
x**2 + y**2 - 25
"""
x = _symbol(x)
y = _symbol(y)
t1 = (x - self.center.x)**2
t2 = (y - self.center.y)**2
return t1 + t2 - self.major**2
def intersection(self, o):
"""The intersection of this circle with another geometrical entity.
Parameters
==========
o : GeometryEntity
Returns
=======
intersection : list of GeometryEntities
Examples
========
>>> from sympy import Point, Circle, Line, Ray
>>> p1, p2, p3 = Point(0, 0), Point(5, 5), Point(6, 0)
>>> p4 = Point(5, 0)
>>> c1 = Circle(p1, 5)
>>> c1.intersection(p2)
[]
>>> c1.intersection(p4)
[Point2D(5, 0)]
>>> c1.intersection(Ray(p1, p2))
[Point2D(5*sqrt(2)/2, 5*sqrt(2)/2)]
>>> c1.intersection(Line(p2, p3))
[]
"""
if isinstance(o, Circle):
if o.center == self.center:
if o.radius == self.radius:
return o
return []
dx, dy = (o.center - self.center).args
d = sqrt(simplify(dy**2 + dx**2))
R = o.radius + self.radius
if d > R or d < abs(self.radius - o.radius):
return []
a = simplify((self.radius**2 - o.radius**2 + d**2) / (2*d))
x2 = self.center.x + (dx * a/d)
y2 = self.center.y + (dy * a/d)
h = sqrt(simplify(self.radius**2 - a**2))
rx = -dy * (h/d)
ry = dx * (h/d)
xi_1 = simplify(x2 + rx)
xi_2 = simplify(x2 - rx)
yi_1 = simplify(y2 + ry)
yi_2 = simplify(y2 - ry)
ret = [Point(xi_1, yi_1)]
if xi_1 != xi_2 or yi_1 != yi_2:
ret.append(Point(xi_2, yi_2))
return ret
return Ellipse.intersection(self, o)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle
>>> Circle((0, 0), 1).scale(2, 2)
Circle(Point2D(0, 0), 2)
>>> Circle((0, 0), 1).scale(2, 4)
Ellipse(Point2D(0, 0), 2, 4)
"""
c = self.center
if pt:
pt = Point(pt)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
c = c.scale(x, y)
x, y = [abs(i) for i in (x, y)]
if x == y:
return self.func(c, x*self.radius)
h = v = self.radius
return Ellipse(c, hradius=h*x, vradius=v*y)
def reflect(self, line):
"""Override GeometryEntity.reflect since the radius
is not a GeometryEntity.
Examples
========
>>> from sympy import Circle, Line
>>> Circle((0, 1), 1).reflect(Line((0, 0), (1, 1)))
Circle(Point2D(1, 0), -1)
"""
c = self.center
c = c.reflect(line)
return self.func(c, -self.radius)
from .polygon import Polygon
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import re
from collections import defaultdict, namedtuple
from hashlib import sha1
from colors import red
from pants.backend.core.tasks.console_task import ConsoleTask
from pants.backend.core.tasks.task import Task
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.build_graph import CycleException, sort_targets
from pants.base.exceptions import TaskError
from pants.base.fingerprint_strategy import FingerprintStrategy
from pants.util.memo import memoized_property
class JvmPlatformAnalysisMixin(object):
"""Mixin which provides common helper methods to JvmPlatformValidate and JvmPlatformExplain."""
@classmethod
def _is_jvm_target(cls, target):
return isinstance(target, JvmTarget)
@classmethod
def jvm_version(cls, target):
return target.platform.target_level
@memoized_property
def jvm_targets(self):
return frozenset(self.context.targets(self._is_jvm_target))
def _unfiltered_jvm_dependency_map(self, fully_transitive=False):
"""Jvm dependency map without filtering out non-JvmTarget keys, exposed for testing.
Unfiltered because the keys in the resulting map include non-JvmTargets.
See the explanation in the jvm_dependency_map() docs for what this method produces.
:param fully_transitive: if true, the elements of the map will be the full set of transitive
JvmTarget dependencies, not just the "direct" ones. (see jvm_dependency_map for the definition
of "direct")
:return: map of target -> set of JvmTarget "direct" dependencies.
"""
targets = self.jvm_targets
jvm_deps = defaultdict(set)
def accumulate_jvm_deps(target):
for dep in target.dependencies:
if self._is_jvm_target(dep):
jvm_deps[target].add(dep)
if not fully_transitive:
continue
# If 'dep' isn't in jvm_deps, that means that it isn't in the `targets` list at all
# (since this is a post-order traversal). If it's not in the targets list at all,
# that means it cannot have any JvmTargets as transitive dependencies. In which case
# we don't care about it, so it's fine that the line below is a no-op.
#
# Otherwise, we add in any transitive dependencies that were previously collected.
jvm_deps[target].update(jvm_deps[dep])
# Vanilla DFS runs in O(|V|+|E|), and the code inside the loop in accumulate_jvm_deps ends up
# being run once for each in the graph over the course of the entire search, which means that
# the total asymptotic runtime complexity is O(|V|+2|E|), which is still O(|V|+|E|).
self.context.build_graph.walk_transitive_dependency_graph(
addresses=[t.address for t in targets],
work=accumulate_jvm_deps,
postorder=True
)
return jvm_deps
@memoized_property
def jvm_dependency_map(self):
"""A map of each JvmTarget in the context to the set of JvmTargets it depends on "directly".
"Directly" is in quotes here because it isn't quite the same as its normal use, which would be
filter(self._is_jvm_target, target.dependencies).
For this method, we define the set of dependencies which `target` depends on "directly" as:
{ dep | dep is a JvmTarget and exists a directed path p from target to dep such that |p| = 1 }
Where |p| is computed as the weighted sum of all edges in the path, where edges to a JvmTarget
have weight 1, and all other edges have weight 0.
In other words, a JvmTarget 'A' "directly" depends on a JvmTarget 'B' iff there exists a path in
the directed dependency graph from 'A' to 'B' such that there are no internal vertices in the
path that are JvmTargets.
This set is a (not necessarily proper) subset of the set of all JvmTargets that the target
transitively depends on. The algorithms using this map *would* operate correctly on the full
transitive superset, but it is more efficient to use this subset.
The intuition for why we can get away with using this subset: Consider targets A, b, C, D,
such that A depends on b, which depends on C, which depends on D. Say A,C,D are JvmTargets.
If A is on java 6 and C is on java 7, we obviously have a problem, and this will be correctly
identified when verifying the jvm dependencies of A, because the path A->b->C has length 1.
If instead, A is on java 6, and C is on java 6, but D is on java 7, we still have a problem.
It will not be detected when processing A, because A->b->C->D has length 2. But when we process
C, it will be picked up, because C->D has length 1.
Unfortunately, we can't do something as simple as just using actual direct dependencies, because
it's perfectly legal for a java 6 A to depend on b (which is a non-JvmTarget), and legal for
b to depend on a java 7 C, so the transitive information is needed to correctly identify the
problem.
:return: the dict mapping JvmTarget -> set of JvmTargets.
"""
jvm_deps = self._unfiltered_jvm_dependency_map()
return {target: deps for target, deps in jvm_deps.items()
if deps and self._is_jvm_target(target)}
class JvmPlatformValidate(JvmPlatformAnalysisMixin, Task):
"""Validation step that runs well in advance of jvm compile.
Ensures that no jvm targets depend on other targets which use a newer platform.
"""
class IllegalJavaTargetLevelDependency(TaskError):
"""A jvm target depends on another jvm target with a newer java target level.
E.g., a java_library targeted for Java 6 depends on a java_library targeted for java 7.
"""
class PlatformFingerprintStrategy(FingerprintStrategy):
"""Fingerprint strategy which only cares a target's platform and dependency ids."""
def compute_fingerprint(self, target):
hasher = sha1()
if hasattr(target, 'platform'):
hasher.update(str(tuple(target.platform)))
return hasher.hexdigest()
def __eq__(self, other):
return type(self) == type(other)
def __hash__(self):
return hash(type(self).__name__)
@classmethod
def product_types(cls):
# NB(gmalmquist): These are fake products inserted to make sure validation is run very early.
# There's no point in doing lots of code-gen and compile work if it's doomed to fail. The
# 'java' product type indicates this task does codegen for java.
# TODO(John Sirois): plug this into a pre-products validation phase when one becomes available
# instead of using fake products.
return ['java']
@classmethod
def register_options(cls, register):
super(JvmPlatformValidate, cls).register_options(register)
register('--check', default='fatal', choices=['off', 'warn', 'fatal'], fingerprint=True,
help='Check to make sure no jvm targets target an earlier jdk than their dependencies')
register('--children-before-parents', default=False, action='store_true',
fingerprint=True,
help='Organize output in the form target -> dependencies, rather than '
'target -> dependees.')
def __init__(self, *args, **kwargs):
super(JvmPlatformValidate, self).__init__(*args, **kwargs)
self.check = self.get_options().check
self.parents_before_children = not self.get_options().children_before_parents
def validate_platform_dependencies(self):
"""Check all jvm targets in the context, throwing an error or warning if there are bad targets.
If there are errors, this method fails slow rather than fails fast -- that is, it continues
checking the rest of the targets before spitting error messages. This is useful, because it's
nice to have a comprehensive list of all errors rather than just the first one we happened to
hit.
"""
conflicts = []
def is_conflicting(target, dependency):
return self.jvm_version(dependency) > self.jvm_version(target)
try:
sort_targets(self.jvm_targets)
except CycleException:
self.context.log.warn('Cannot validate dependencies when cycles exist in the build graph.')
return
try:
with self.invalidated(self.jvm_targets,
fingerprint_strategy=self.PlatformFingerprintStrategy(),
invalidate_dependents=True) as vts:
dependency_map = self.jvm_dependency_map
for vts_target in vts.invalid_vts:
for target in vts_target.targets:
if target in dependency_map:
deps = dependency_map[target]
invalid_dependencies = [dep for dep in deps if is_conflicting(target, dep)]
if invalid_dependencies:
conflicts.append((target, invalid_dependencies))
if conflicts:
# NB(gmalmquist): It's important to unconditionally raise an exception, then decide later
# whether to continue raising it or just print a warning, to make sure the targets aren't
# marked as valid if there are invalid platform dependencies.
error_message = self._create_full_error_message(conflicts)
raise self.IllegalJavaTargetLevelDependency(error_message)
except self.IllegalJavaTargetLevelDependency as e:
if self.check == 'fatal':
raise e
else:
assert self.check == 'warn'
self.context.log.warn(error_message)
return error_message
def _create_individual_error_message(self, target, invalid_dependencies):
return '\n {target} targeting "{platform_name}"\n {relationship}: {dependencies}'.format(
target=target.address.spec,
platform_name=target.platform.name,
dependencies=''.join('\n {} targeting "{}"'.format(d.address.spec, d.platform.name)
for d in sorted(invalid_dependencies)),
relationship='is depended on by' if self.parents_before_children else 'depends on',
)
def _create_full_error_message(self, invalids):
if self.parents_before_children:
dependency_to_dependees = defaultdict(set)
for target, deps in invalids:
for dep in deps:
dependency_to_dependees[dep].add(target)
invalids = dependency_to_dependees.items()
invalids = sorted(invalids)
individual_errors = '\n'.join(self._create_individual_error_message(target, deps)
for target, deps in invalids)
return ('Dependencies cannot have a higher java target level than dependees!\n{errors}\n\n'
'Consider running ./pants jvm-platform-explain with the same targets for more details.'
.format(errors=individual_errors))
def execute(self):
if self.check != 'off':
# Return value is just for unit testing.
return self.validate_platform_dependencies()
class JvmPlatformExplain(JvmPlatformAnalysisMixin, ConsoleTask):
"""Console task which provides helpful analysis about jvm platform dependencies.
This can be very useful when debugging inter-dependencies in large sets of targets with a variety
of jvm platforms.
By default, this calculates the minimum and maximum possible -target level of each JvmTarget
specified, printing the range for each one on the console. This is determined by a target's
dependencies and dependees: a target cannot have a higher -target level than its dependees, and
it cannot have a lower -target level than any of its dependencies.
Additional flags fine-tune this output, including printing more detailed analysis of which
dependencies/dependees are limiting a target, or filtering the output to only targets you care
about.
Besides this functionality, --upgradeable and --downgradeable can print lists of targets which
can (again, based on the limits of their dependencies and dependees) afford to be upgraded or
downgraded to a different version.
"""
Ranges = namedtuple('ranges', ['min_allowed_version', 'max_allowed_version',
'target_dependencies', 'target_dependees'])
@classmethod
def register_options(cls, register):
super(JvmPlatformExplain, cls).register_options(register)
register('--ranges', action='store_true', default=True,
help='For each target, list the minimum and maximum possible jvm target level, based '
'on its dependencies and dependees, respectively.')
register('--detailed', action='store_true', default=False,
help='Always list the dependencies and dependees that contributed to the assessment of '
'legal jvm target levels (rather than only on failure).')
register('--only-broken', action='store_true', default=False,
help='Only print jvm target level ranges for targets with currently invalid ranges.')
register('--upgradeable', action='store_true', default=False,
help='Print a list of targets which can be upgraded to a higher version than they '
'currently are.')
register('--downgradeable', action='store_true', default=False,
help='Print a list of targets which can be downgraded to a lower version than they '
'currently are.')
register('--filter',
help='Limit jvm platform possibility explanation to targets whose specs match this '
'regex pattern.')
register('--transitive', action='store_true', default=False,
help='List transitive dependencies in analysis output.')
def __init__(self, *args, **kwargs):
super(JvmPlatformExplain, self).__init__(*args, **kwargs)
self.explain_regex = (re.compile(self.get_options().filter) if self.get_options().filter
else None)
self.detailed = self.get_options().detailed
self.only_broken = self.get_options().only_broken
self.transitive = self.get_options().transitive
def _format_error(self, text):
if self.get_options().colors:
return red(text)
return text
def _is_relevant(self, target):
return not self.explain_regex or self.explain_regex.match(target.address.spec)
@memoized_property
def dependency_map(self):
if not self.transitive:
return self.jvm_dependency_map
full_map = self._unfiltered_jvm_dependency_map(fully_transitive=True)
return {target: deps for target, deps in full_map.items()
if self._is_jvm_target(target) and deps}
@memoized_property
def _ranges(self):
target_dependencies = defaultdict(set)
target_dependencies.update(self.dependency_map)
target_dependees = defaultdict(set)
for target, deps in target_dependencies.items():
for dependency in deps:
target_dependees[dependency].add(target)
max_allowed_version = {}
min_allowed_version = {}
def get_versions(targets):
return map(self.jvm_version, targets)
for target in self.jvm_targets:
if target_dependencies[target]:
# A target's version must at least as high as its dependencies.
min_allowed_version[target] = max(get_versions(target_dependencies[target]))
if target_dependees[target]:
# A target can't have a higher version than any of its dependees.
max_allowed_version[target] = min(get_versions(target_dependees[target]))
return self.Ranges(min_allowed_version, max_allowed_version, target_dependencies,
target_dependees)
def possible_version_evaluation(self):
"""Evaluate the possible range of versions for each target, yielding the output analysis."""
ranges = self._ranges
yield 'Allowable JVM platform ranges (* = anything):'
for target in sorted(filter(self._is_relevant, self.jvm_targets)):
min_version = ranges.min_allowed_version.get(target)
max_version = ranges.max_allowed_version.get(target)
current_valid = True
if min_version and self.jvm_version(target) < min_version:
current_valid = False
if max_version and self.jvm_version(target) > max_version:
current_valid = False
current_text = str(self.jvm_version(target))
if not current_valid:
current_text = self._format_error(current_text)
elif self.only_broken:
continue
if min_version and max_version:
range_text = '{} to {}'.format(min_version, max_version)
if min_version > max_version:
range_text = self._format_error(range_text)
elif min_version:
range_text = '{}+'.format(min_version)
elif max_version:
range_text = '<={}'.format(max_version)
else:
range_text = '*'
yield '{address}: {range} (is {current})'.format(address=target.address.spec,
range=range_text,
current=current_text,)
if self.detailed or not current_valid:
if min_version:
min_because = [t for t in ranges.target_dependencies[target]
if self.jvm_version(t) == min_version]
yield ' min={} because of dependencies:'.format(min_version)
for dep in sorted(min_because):
yield ' {}'.format(dep.address.spec)
if max_version:
max_because = [t for t in ranges.target_dependees[target]
if self.jvm_version(t) == max_version]
yield ' max={} because of dependees:'.format(max_version)
for dep in sorted(max_because):
yield ' {}'.format(dep.address.spec)
yield ''
def _changeable(self, change_name, can_change, change_getter):
changes = {}
for target in filter(self._is_relevant, self.jvm_targets):
allowed = change_getter(target)
if allowed is None or can_change(self.jvm_version(target), allowed):
changes[target] = allowed
yield 'The following {count} target{plural} can be {change}d:'.format(
count=len(changes),
change=change_name,
plural='' if len(changes) == 1 else 's',
)
for target, allowed in sorted(changes.items()):
yield '{target} can {change} to {allowed}'.format(target=target.address.spec,
allowed=allowed or '*',
change=change_name)
yield ''
def downgradeable(self):
return self._changeable('downgrade',
can_change=lambda curr, nxt: curr > nxt,
change_getter=self._ranges.min_allowed_version.get)
def upgradeable(self):
return self._changeable('upgrade',
can_change=lambda curr, nxt: curr < nxt,
change_getter=self._ranges.max_allowed_version.get)
def console_output(self, targets):
if self.get_options().ranges:
for line in self.possible_version_evaluation():
yield line
if self.get_options().upgradeable:
for line in self.upgradeable():
yield line
if self.get_options().downgradeable:
for line in self.downgradeable():
yield line
| |
##
# Copyright (c) 2013-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet.defer import inlineCallbacks, returnValue
from txdav.common.datastore.podding.base import FailedCrossPodRequestError
from txdav.who.delegates import Delegates
class DirectoryPoddingConduitMixin(object):
"""
Defines the cross-pod API for common directory operations that will be mixed into the
L{PoddingConduit} class.
"""
@inlineCallbacks
def send_all_group_delegates(self, txn, server):
"""
Request all group delegates on another pod.
@param txn: transaction to use
@type txn: L{CommonStoreTransaction}
@param server: server to query
@type server: L{Server}
"""
request = {
"action": "all-group-delegates",
}
response = yield self.sendRequestToServer(txn, server, request)
returnValue(set(response))
@inlineCallbacks
def recv_all_group_delegates(self, txn, request):
"""
Process an all group delegates cross-pod request. Request arguments as per L{send_all_group_delegates}.
@param request: request arguments
@type request: C{dict}
"""
delegatedUIDs = yield txn.allGroupDelegates()
returnValue(list(delegatedUIDs))
@inlineCallbacks
def send_set_delegates(self, txn, delegator, delegates, readWrite):
"""
Set delegates for delegator on another pod.
@param txn: transaction to use
@type txn: L{CommonStoreTransaction}
@param delegator: delegator to set
@type delegator: L{DirectoryRecord}
@param delegates: delegates to set
@type delegates: L{list} of L{DirectoryRecord}
@param readWrite: if True, read and write access delegates are returned;
read-only access otherwise
"""
if delegator.thisServer():
raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(delegator.uid))
request = {
"action": "set-delegates",
"uid": delegator.uid,
"delegates": [delegate.uid for delegate in delegates],
"read-write": readWrite,
}
yield self.sendRequestToServer(txn, delegator.server(), request)
@inlineCallbacks
def recv_set_delegates(self, txn, request):
"""
Process a set delegates cross-pod request. Request arguments as per L{send_set_delegates}.
@param request: request arguments
@type request: C{dict}
"""
delegator = yield txn.directoryService().recordWithUID(request["uid"])
if delegator is None or not delegator.thisServer():
raise FailedCrossPodRequestError("Cross-pod delegate not on this server: {}".format(delegator.uid))
delegates = []
for uid in request["delegates"]:
delegate = yield txn.directoryService().recordWithUID(uid)
if delegate is None:
raise FailedCrossPodRequestError("Cross-pod delegate missing on this server: {}".format(uid))
delegates.append(delegate)
yield Delegates.setDelegates(txn, delegator, delegates, request["read-write"])
@inlineCallbacks
def send_get_delegates(self, txn, delegator, readWrite, expanded=False):
"""
Get delegates from another pod.
@param txn: transaction to use
@type txn: L{CommonStoreTransaction}
@param delegator: delegator to lookup
@type delegator: L{DirectoryRecord}
@param readWrite: if True, read and write access delegates are returned;
read-only access otherwise
"""
if delegator.thisServer():
raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(delegator.uid))
request = {
"action": "get-delegates",
"uid": delegator.uid,
"read-write": readWrite,
"expanded": expanded,
}
response = yield self.sendRequestToServer(txn, delegator.server(), request)
returnValue(set(response))
@inlineCallbacks
def recv_get_delegates(self, txn, request):
"""
Process an delegates cross-pod request. Request arguments as per L{send_get_delegates}.
@param request: request arguments
@type request: C{dict}
"""
delegator = yield txn.directoryService().recordWithUID(request["uid"])
if delegator is None or not delegator.thisServer():
raise FailedCrossPodRequestError("Cross-pod delegate not on this server: {}".format(delegator.uid))
delegates = yield Delegates._delegatesOfUIDs(txn, delegator, request["read-write"], request["expanded"])
returnValue(list(delegates))
@inlineCallbacks
def send_get_delegators(self, txn, server, delegate, readWrite):
"""
Get delegators from another pod.
@param txn: transaction to use
@type txn: L{CommonStoreTransaction}
@param server: server to query
@type server: L{Server}
@param delegate: delegate to lookup
@type delegate: L{DirectoryRecord}
@param readWrite: if True, read and write access delegates are returned;
read-only access otherwise
"""
if not delegate.thisServer():
raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(delegate.uid))
request = {
"action": "get-delegators",
"uid": delegate.uid,
"read-write": readWrite,
}
response = yield self.sendRequestToServer(txn, server, request)
returnValue(set(response))
@inlineCallbacks
def recv_get_delegators(self, txn, request):
"""
Process an delegators cross-pod request. Request arguments as per L{send_get_delegators}.
@param request: request arguments
@type request: C{dict}
"""
delegate = yield txn.directoryService().recordWithUID(request["uid"])
if delegate is None or delegate.thisServer():
raise FailedCrossPodRequestError("Cross-pod delegate missing or on this server: {}".format(delegate.uid))
delegators = yield Delegates._delegatedToUIDs(txn, delegate, request["read-write"], onlyThisServer=True)
returnValue(list(delegators))
@inlineCallbacks
def send_dump_individual_delegates(self, txn, delegator):
"""
Get L{DelegateRecords} from another pod.
@param txn: transaction to use
@type txn: L{CommonStoreTransaction}
@param delegator: delegate to lookup
@type delegator: L{DirectoryRecord}
@param readWrite: if True, read and write access delegates are returned;
read-only access otherwise
"""
if delegator.thisServer():
raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(delegator.uid))
request = {
"action": "dump-individual-delegates",
"uid": delegator.uid,
}
response = yield self.sendRequestToServer(txn, delegator.server(), request)
returnValue(response)
@inlineCallbacks
def recv_dump_individual_delegates(self, txn, request):
"""
Process an delegators cross-pod request. Request arguments as per L{send_dump_individual_delegates}.
@param request: request arguments
@type request: C{dict}
"""
delegator = yield txn.directoryService().recordWithUID(request["uid"])
if delegator is None or not delegator.thisServer():
raise FailedCrossPodRequestError("Cross-pod delegate missing or on this server: {}".format(delegator.uid))
delegates = yield txn.dumpIndividualDelegatesLocal(delegator.uid)
returnValue(self._to_serialize_list(delegates))
@inlineCallbacks
def send_dump_group_delegates(self, txn, delegator):
"""
Get L{DelegateGroupsRecord},L{GroupsRecord} from another pod.
@param txn: transaction to use
@type txn: L{CommonStoreTransaction}
@param delegator: delegate to lookup
@type delegator: L{DirectoryRecord}
@param readWrite: if True, read and write access delegates are returned;
read-only access otherwise
"""
if delegator.thisServer():
raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(delegator.uid))
request = {
"action": "dump-group-delegates",
"uid": delegator.uid,
}
response = yield self.sendRequestToServer(txn, delegator.server(), request)
returnValue(response)
@inlineCallbacks
def recv_dump_group_delegates(self, txn, request):
"""
Process an delegators cross-pod request. Request arguments as per L{send_dump_group_delegates}.
@param request: request arguments
@type request: C{dict}
"""
delegator = yield txn.directoryService().recordWithUID(request["uid"])
if delegator is None or not delegator.thisServer():
raise FailedCrossPodRequestError("Cross-pod delegate missing or on this server: {}".format(delegator.uid))
results = yield txn.dumpGroupDelegatesLocal(delegator.uid)
returnValue([[delegator_record.serialize(), group_record.serialize()] for delegator_record, group_record in results])
@inlineCallbacks
def send_dump_external_delegates(self, txn, delegator):
"""
Get L{ExternalDelegateGroupsRecord} from another pod.
@param txn: transaction to use
@type txn: L{CommonStoreTransaction}
@param delegator: delegate to lookup
@type delegator: L{DirectoryRecord}
@param readWrite: if True, read and write access delegates are returned;
read-only access otherwise
"""
if delegator.thisServer():
raise FailedCrossPodRequestError("Cross-pod destination on this server: {}".format(delegator.uid))
request = {
"action": "dump-external-delegates",
"uid": delegator.uid,
}
response = yield self.sendRequestToServer(txn, delegator.server(), request)
returnValue(response)
@inlineCallbacks
def recv_dump_external_delegates(self, txn, request):
"""
Process an delegators cross-pod request. Request arguments as per L{send_dump_external_delegates}.
@param request: request arguments
@type request: C{dict}
"""
delegator = yield txn.directoryService().recordWithUID(request["uid"])
if delegator is None or not delegator.thisServer():
raise FailedCrossPodRequestError("Cross-pod delegate missing or on this server: {}".format(delegator.uid))
delegates = yield txn.dumpExternalDelegatesLocal(delegator.uid)
returnValue(self._to_serialize_list(delegates))
| |
# Copyright 2011-2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#======================================================================
#
# ICMPv6 Header Format
#
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Type | Code | Checksum |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
#
#======================================================================
"""
This file parses ICMPv6 as well as NDP
See RFCs 4443 and 4861 in particular.
"""
#TODO: Move NDP into its own file?
#TODO: Clean this up in general
#TODO: Write tests (at least pack/unpack)
import struct
import random
import new
from packet_utils import *
from packet_base import packet_base
from pox.lib.addresses import IPAddr6,EthAddr
from pox.lib.util import hexdump, init_helper
# Errors
TYPE_DEST_UNREACH = 1
TYPE_PACKET_TOO_BIG = 2
TYPE_TIME_EXCEED = 3
TYPE_PARAM_PROB = 4
# Informational
TYPE_ECHO_REQUEST = 128
TYPE_ECHO_REPLY = 129
TYPE_MC_LISTENER_QUERY = 130
TYPE_MC_LISTENER_REPORT = 131
TYPE_MC_LISTENER_DONE = 132
TYPE_ROUTER_SOLICITATION = 133 # NDP
TYPE_ROUTER_ADVERTISEMENT = 134 # NDP
TYPE_NEIGHBOR_SOLICITATION = 135 # NDP
TYPE_NEIGHBOR_ADVERTISEMENT = 136 # NDP
TYPE_REDIRECT = 137 # NDP
TYPE_ROUTER_RENUMBER = 138
TYPE_MC_LISTENER_REPORT_V2 = 143
TYPE_MRD_ADVERTISEMENT = 151
TYPE_MRD_SOLICITATION = 152
TYPE_MRD_TERMINATION = 153
CODE_UNREACH_NO_ROUTE = 0
CODE_UNREACH_ADMIN_PROHIBIT = 1
CODE_UNREACH_BEYOND_SRC_SCOPE = 2
CODE_UNREACH_ADDR_UNREACHABLE = 3
CODE_UNREACH_PORT_UNREACHABLE = 4
CODE_UNREACH_SRC_POLICY_FAIL = 5
CODE_UNREACH_DST_ROUTE_REJECT = 6
CODE_UNREACH_SRC_ROUTE_ERROR = 7
CODE_TIME_HOP_EXCEEDED = 0
CODE_TIME_FRAG_TIME_EXCEEDED = 1
CODE_PARAM_BAD_HEADER = 0
CODE_PARAM_BAD_NEXT_HEADER = 1
CODE_PARAM_BAD_OPTION = 2
#TODO: Use a class registry for this
_type_to_name = {
1 : "TYPE_DEST_UNREACH",
2 : "TYPE_PACKET_TOO_BIG",
3 : "TYPE_TIME_EXCEED",
4 : "TYPE_PARAM_PROB",
128 : "TYPE_ECHO_REQUEST",
129 : "TYPE_ECHO_REPLY",
130 : "TYPE_MC_LISTENER_QUERY",
131 : "TYPE_MC_LISTENER_REPORT",
132 : "TYPE_MC_LISTENER_DONE",
133 : "TYPE_ROUTER_SOLICITATION",
134 : "TYPE_ROUTER_ADVERTISEMENT",
135 : "TYPE_NEIGHBOR_SOLICITATION",
136 : "TYPE_NEIGHBOR_ADVERTISEMENT",
137 : "TYPE_REDIRECT",
138 : "TYPE_ROUTER_RENUMBER",
143 : "TYPE_MC_LISTENER_REPORT_V2",
151 : "TYPE_MRD_ADVERTISEMENT",
152 : "TYPE_MRD_SOLICITATION",
153 : "TYPE_MRD_TERMINATION",
}
_nd_options = {}
def nd_option_def (cls):
"""
Neighbor Discovery option decorator
"""
_nd_options[cls.TYPE] = cls
return cls
def _parse_ndp_options (raw, prev, offset = 0, buf_len = None):
"""
Parse ICMPv6 options and return (new_offset,[option_list])
"""
# This is pretty bad at the moment
_offset = offset
if buf_len is None: buf_len = len(raw)
remaining = buf_len - offset
r = []
while offset < buf_len - 2:
if (buf_len - offset) % 8 != 0:
raise RuntimeError("Bad option data length")
offset,o = NDOptionBase.unpack_new(raw, offset, buf_len, prev=prev)
r.append(o)
return offset,r
class NDOptionBase (packet_base):
"Neighbor Discovery option base class"
#LENGTH = <fixed padded payload length in bytes or None>
#TYPE = <type>
def __init__ (self, *args, **kw):
self.prev = kw.pop('prev', None)
self._init(*args, **kw)
init_helper(self, kw)
def __repr__ (self):
s = type(self).__name__
if s.startswith("NDOption"):
s = s[8:]
elif s.startswith("NDOpt"):
s = s[5:]
ss = self._fields()
if ss:
s += ' '
s += " ".join(["%s:%s" % (k,v) for k,v in ss.iteritems()])
return "[" + s + "]"
@property
def type (self):
return self.prev.type
@property
def code (self):
return self.prev.code
def _fields (self):
"""
Override to add fields to stringizing
"""
return None
def _init (self, *args, **kw):
"""
Called during initialization
Override me
"""
pass
def __len__ (self):
"""
Payload length in bytes
Override if your option type has flexible length
"""
assert self.LENGTH is not None
return self.LENGTH
@staticmethod
def unpack_new (raw, offset = 0, buf_len = None, prev = None):
"""
Unpacks a new instance of the appropriate subclass from a buffer
returns (new_offset, object)
"""
if buf_len is None: buf_len = len(raw)
if buf_len < 2:
raise TruncatedException()
t,l = struct.unpack_from("BB", raw, offset)
if l == 0:
raise RuntimeError("Zero-length NDP option")
offset += 2
length_bytes = l * 8 - 2
if (buf_len - offset) < length_bytes:
raise TruncatedException()
c = _nd_options.get(t) #FIXME: Ugh, *class registry*
if c is None:
c = NDOptionGeneric
if c.LENGTH is not None and c.LENGTH != length_bytes:
raise RuntimeError("Bad length for NDP option")
new_off,o = c._unpack_new(raw, offset, t, length_bytes, prev=prev)
assert new_off == offset+length_bytes
return new_off,o
def pack (self):
d = self._pack_body()
while (len(d)+2) % 8: d += "\x00" # sloppy
return struct.pack("BB", self.TYPE, (len(d)+2)/8) + d
@classmethod
def _unpack_new (cls, raw, offset, t, length, prev):
"""
Unpacks the body portion of this option type into a new object
Override me.
"""
raise RuntimeError("Not implemented")
#o = new.instance(cls)
#o._init()
#return offset+length,o
def _pack_body (self):
"""
Returns the body of this option packed into bytes
Override me
"""
raise RuntimeError("Not implemented")
#return b''
class NDOptionGeneric (NDOptionBase):
LENGTH = None
TYPE = None
def __repr__ (self):
return "<NDP Option Type %s>" % (self.TYPE,)
def _init (self, *args, **kw):
self.raw = b''
def __len__ (self):
return len(self.raw)
def _pack_body (self):
return self.raw
@classmethod
def _unpack_new (cls, raw, offset, t, length, prev):
"""
Unpacks the body portion of this option type into a new object
Override me.
"""
#o = new.instance(cls) # Weird; this doesn't work despite the fact
# that it should be a new style class.
o = cls()
o._init()
o.TYPE = t
o.prev = prev
#o.LENGTH = length
o.raw = raw[offset:offset+length]
return offset+length,o
class NDOptLinkLayerAddress (NDOptionBase):
"""
Superclass for this source/target LL address options
Assumes L2 is Ethernet
"""
LENGTH = 6
def _init (self, *args, **kw):
a = kw.pop('address',None)
if a is None:
self.address = None
else:
self.address = EthAddr(a)
def _fields (self):
return {'addr':self.address}
@classmethod
def _unpack_new (cls, raw, offset, t, length, prev):
return offset+length,cls(address = EthAddr(raw[offset:offset+length]),
prev=prev)
def _pack_body (self):
return self.address.raw
@nd_option_def
class NDOptSourceLinkLayerAddress (NDOptLinkLayerAddress):
TYPE = 1
@nd_option_def
class NDOptTargetLinkLayerAddress (NDOptLinkLayerAddress):
TYPE = 2
@nd_option_def
class NDOptPrefixInformation (NDOptionBase):
LENGTH = 1 + 1 + 4 + 4 + 4 + 4 * 4
TYPE = 3
ON_LINK_FLAG = 0x80
AUTONOMOUS_FLAG = 0x40
def _init (self, *args, **kw):
self.prefix_length = 0
self.on_link = False
self.is_autonomous = False
self.valid_lifetime = 0
self.preferred_lifetime = 0
self.prefix = IPAddr6.UNDEFINED
def _fields (self):
r = {}
if self.on_link: r['on_link'] = True
if self.is_autonomous: r['autonomous'] = True
r['valid'] = self.valid_lifetime
r['preferred'] = self.preferred_lifetime
r['prefix'] = "%s/%s" % (self.prefix, self.prefix_length)
return r
@classmethod
def _unpack_new (cls, raw, offset, t, length, prev):
o = cls()
o.prefix_length,flags,o.valid_lifetime,o.preferred_lifetime = \
struct.unpack_from('!BBII', raw, offset)
offset += 1 + 1 + 4 + 4
offset += 4 # Reserved
o.prefix = IPAddr6(raw=raw[offset:offset+16])
offset += 16
o.on_link = (flags & cls.ON_LINK_FLAG) != 0
o.is_autonomous = (flags & cls.AUTONOMOUS_FLAG) != 0
o.prev = prev
return offset,o
@property
def flags (self):
f = 0
if self.on_link: f |= self.ON_LINK_FLAG
if self.is_autonomous: f |= self.AUTONOMOUS_FLAG
return f
def pack (self):
s = struct.pack("!BBII", self.prefix_length, self.flags,
self.valid_lifetime,self.preferred_lifetime)
s += '\x00' * 4
s += self.prefix.raw
return s
@nd_option_def
class NDOptMTU (NDOptionBase):
LENGTH = 6
TYPE = 5
def _init (self, *args, **kw):
self.mtu = 0
def _fields (self):
return {'mtu':self.mtu}
@classmethod
def _unpack_new (cls, raw, offset, t, length, prev):
o = cls()
o.prev = prev
_,o.mtu = struct.unpack_from('!HI', raw, offset)
offset += 2 + 4
return offset,o
def pack (self):
return struct.pack("!HI", 0, self.mtu)
#NOTE: icmp_base sort of ignores the usual packet_base API. Hopefully
# the way it does so doesn't break too much. The API it supports
# is closer to the way a newer version of the API would work.
class icmp_base (packet_base):
"ICMPv6 base class"
def __str__ (self):
s = "[ICMPv6/" + self.__class__.__name__
ss = self._fields()
if ss:
s += ' '
s += " ".join(["%s:%s" % (k,v) for k,v in ss.iteritems()])
return s + "]"
def _fields (self):
"""
Return map of fields used for string formatting.
Override me to customize stringizing.
"""
return {}
def _init_ (self):
"""
Called during initialization
Override me
In most other hierarchies that follow a similar pattern, this method
would be named "_init", but that name is already used in the
packet_base hierarchy.
"""
pass
@property
def type (self):
return self.prev.type
@property
def code (self):
return self.prev.code
def __init__ (self, prev=None, **kw):
packet_base.__init__(self)
self.prev = prev
self.next = None
self._init_()
self._init(kw)
self.parsed = True
@classmethod
def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None):
"""
Unpacks a new instance of this class from a buffer
returns (new_offset, object)
"""
raise RuntimeError("Unimplemented on class %s" % (cls.__name__,))
#.parsed = True
def pack (self):
raise RuntimeError("Unimplemented on class %s" % (type(self).__name__,))
class ICMPGeneric (icmp_base):
def _fields (self):
return {'bytes':len(self.raw)}
def _init_ (self):
self.raw = b''
@classmethod
def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None):
o = cls()
o.raw = raw[offset:offset+buf_len]
o.prev = prev
o.parsed = True
return offset+buf_len,o
def pack (self):
return self.raw
class NDRouterSolicitation (icmp_base):
"Router Solicitation"
def _init_ (self):
self.options = []
def _fields (self):
return {"num_opts":len(self.options)}
@classmethod
def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None):
o = cls()
_offset = offset
if buf_len is None: buf_len = len(raw)
try:
offset += 4 # Skip reserved
offset,o.options = _parse_ndp_options(raw, prev, offset, buf_len)
o.parsed = True
except TruncatedException:
pass
o.prev = prev
return offset,o
def pack (self):
o = '\x00' * 4 # _PAD4
for opt in self.options:
o += opt.pack()
return o
class NDRouterAdvertisement (icmp_base):
"Router Advertisement"
MANAGED_FLAG = 0x80
OTHER_FLAG = 0x40
def __init__ (self, raw=None, prev=None, **kw):
icmp_base.__init__(self)
self.prev = prev
self.hop_limit = 0
self.is_managed = False
self.is_other = False
self.lifetime = 0 # seconds
self.reachable = 0 # milliseconds
self.retrans_timer = 0 # milliseconds
self.options = []
if raw is not None: self.parse(raw)
self._init(kw)
def _fields (self):
f = ['hop_limit','lifetime','reachable',
'retrans_timer']
r = {}
#if len(self.options): r['num_opts'] = len(self.options)
if len(self.options): r["opts"] = self.options
if self.is_managed: r['managed'] = True
if self.is_other: r['other'] = True
for ff in f:
r[ff] = getattr(self, ff)
return r
@classmethod
def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None):
o = cls()
_offset = offset
if buf_len is None: buf_len = len(raw)
try:
o.hop_limit,flags,o.lifetime,o.reachable,o.retrans_time = \
struct.unpack_from("!BBHII", raw, offset)
offset += 1 + 1 + 2 + 4 + 4
offset,o.options = _parse_ndp_options(raw, prev, offset, buf_len)
o.is_managed = flags & cls.MANAGED_FLAG
o.is_other = flags & cls.OTHER_FLAG
o.parsed = True
except TruncatedException:
pass
o.raw = raw[_offset:offset]
o.prev = prev
return offset,o
@property
def flags (self):
f = 0
if self.is_managed: f |= self.MANAGED_FLAG
if self.is_other: f |= self.OTHER_FLAG
return f
def pack (self):
o = '\x00' * 4 # _PAD4
o += struct.pack("!BBHII", self.hop_limit, self.flags, self.lifetime,
self.reachable, self.retrans_time)
for opt in self.options:
o += opt.pack()
return o
class NDNeighborSolicitation (icmp_base):
"Neighbor Solicitation"
def __init__ (self, raw=None, prev=None, **kw):
icmp_base.__init__(self)
self.prev = prev
self.target = IPAddr6.UNDEFINED
self.options = []
if raw is not None: self.parse(raw)
self._init(kw)
def _fields (self):
f = ['target']
r = {'num_opts':len(self.options)}
r["opts"]=self.options
for ff in f:
r[ff] = getattr(self, ff)
return r
@classmethod
def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None):
o = cls()
_offset = offset
if buf_len is None: buf_len = len(raw)
try:
offset += 4 # Skip reserved
o.target = IPAddr6(raw=raw[offset:offset+16])
offset += 16
offset,o.options = _parse_ndp_options(raw, prev, offset, buf_len)
o.parsed = True
except TruncatedException:
pass
o.raw = raw[_offset:offset]
o.prev = prev
return offset,o
def pack (self):
o = '\x00' * 4 # _PAD4
o += self.target.raw
for opt in self.options:
o += opt.pack()
return o
class NDNeighborAdvertisement (icmp_base):
"Neighbor Advertisement"
ROUTER_FLAG = 0x80
SOLICITED_FLAG = 0x40
OVERRIDE_FLAG = 0x20
def __init__ (self, raw=None, prev=None, **kw):
icmp_base.__init__(self)
self.prev = prev
self.target = IPAddr6.UNDEFINED
self.options = []
self.is_router = False
self.is_solicited = False
self.is_override = False
if raw is not None: self.parse(raw)
self._init(kw)
def _fields (self):
f = ['target']
r = {}
#if len(self.options): r['num_opts'] = len(self.options)
if len(self.options): r["opts"] = self.options
if self.is_router: r['router'] = True
if self.is_solicited: r['solicited'] = True
if self.is_override: r['override'] = True
for ff in f:
r[ff] = getattr(self, ff)
return r
@classmethod
def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None):
o = cls()
_offset = offset
if buf_len is None: buf_len = len(raw)
try:
flags = ord(raw[offset])
o.is_router = (flags & cls.ROUTER_FLAG) != 0
o.is_solicited = (flags & cls.SOLICITED_FLAG) != 0
o.is_override = (flags & cls.OVERRIDE_FLAG) != 0
offset += 4 # Skip reserved
o.target = IPAddr6(raw=raw[offset:offset+16])
offset += 16
offset,o.options = _parse_ndp_options(raw, prev, offset, buf_len)
o.parsed = True
except TruncatedException:
pass
o.raw = raw[_offset:offset]
o.prev = prev
return offset,o
def pack (self):
o = 0
if self.is_router: o |= self.ROUTER_FLAG
if self.is_solicited: o |= self.SOLICITED_FLAG
if self.is_override : o |= self.OVERRIDE_FLAG
o = chr(o)
o += '\x00' * 3 # _PAD3
o += self.target.raw
for opt in self.options:
o += opt.pack()
return o
class TimeExceeded (icmp_base):
"Time Exceeded Big Message"
def __init__ (self, raw=None, prev=None, **kw):
icmp_base.__init__(self)
self.prev = prev
self.next = None
if raw is not None: self.parse(raw)
self._init(kw)
def _fields (self):
f = ['mtu']
r = {}
for ff in f:
r[ff] = getattr(self, ff)
return r
@classmethod
def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None):
o = cls()
_offset = offset
if buf_len is None: buf_len = len(raw)
try:
offset += 4 # Unused
o.next = raw[offset:buf_len]
offset = buf_len
o.parsed = True
except TruncatedException:
pass
o.raw = raw[_offset:offset]
o.prev = prev
return offset,o
def hdr (self, payload):
return struct.pack('!I', 0) # Unused
class PacketTooBig (icmp_base):
"Packet Too Big Message"
def __init__ (self, raw=None, prev=None, **kw):
icmp_base.__init__(self)
self.prev = prev
self.next = None
self.mtu = 0
if raw is not None: self.parse(raw)
self._init(kw)
def _fields (self):
f = ['mtu']
r = {}
for ff in f:
r[ff] = getattr(self, ff)
return r
@classmethod
def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None):
o = cls()
_offset = offset
if buf_len is None: buf_len = len(raw)
try:
o.mtu = struct.unpack_from("!I", raw, offset)
offset += 4
o.next = raw[offset:buf_len]
offset = buf_len
o.parsed = True
except TruncatedException:
pass
o.raw = raw[_offset:offset]
o.prev = prev
return offset,o
def hdr (self, payload):
return struct.pack('!I', self.mtu)
class unpack_new_adapter (object):
"""
Mixin to support unpack_new on classes with old-style construction/parse()
"""
@classmethod
def unpack_new (cls, raw, offset = 0, buf_len = None, prev = None):
raw = raw[offset:]
if buf_len is not None:
raw = raw[:buf_len]
o = cls(raw=raw,prev=prev)
#o.parse(raw)
return offset+len(o.raw),o
#----------------------------------------------------------------------
#
# Echo Request/Reply
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Identifier | Sequence Number |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
#----------------------------------------------------------------------
class echo (packet_base, unpack_new_adapter):
"ICMP echo packet struct"
MIN_LEN = 4
def __init__ (self, raw=None, prev=None, **kw):
packet_base.__init__(self)
self.prev = prev
self.id = random.randint(0, 65535)
self.seq = 0
if raw is not None:
self.parse(raw)
self._init(kw)
def __str__ (self):
return "[ICMP6 echo id:%i seq:%i]" % (self.id, self.seq)
def parse (self, raw):
assert isinstance(raw, bytes)
self.raw = raw
dlen = len(raw)
if dlen < self.MIN_LEN:
self.msg('(echo parse) warning echo payload too short to '
'parse header: data len %u' % (dlen,))
return
(self.id, self.seq) = struct.unpack('!HH', raw[:self.MIN_LEN])
self.parsed = True
self.next = raw[echo.MIN_LEN:]
def hdr (self, payload):
return struct.pack('!HH', self.id, self.seq)
#----------------------------------------------------------------------
#
# Destination Unreachable
# 0 1 2 3
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Unused |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | IP Header + 8 bytes of original datagram's data |
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
#
#----------------------------------------------------------------------
class unreach (packet_base, unpack_new_adapter):
"ICMP unreachable packet struct"
MIN_LEN = 4
def __init__ (self, raw=None, prev=None, **kw):
self.prev = prev
self.unused = 0
if raw is not None:
self.parse(raw)
self._init(kw)
def __str__ (self):
s = ''.join(('[', 'm:', str(self.next_mtu), ']'))
return _str_rest(s, self)
def parse (self, raw):
assert isinstance(raw, bytes)
self.raw = raw
dlen = len(raw)
if dlen < self.MIN_LEN:
self.msg('(unreach parse) warning unreachable payload too '
+ 'short to parse header: data len %u' % (dlen,))
return
(self.unused,) = struct.unpack('!I', raw[:self.MIN_LEN])
self.parsed = True
import ipv6
# xxx We're assuming this is IPv6!
if dlen >= 8 + ipv6.MIN_LEN:
self.next = ipv6.ipv6(raw=raw[unreach.MIN_LEN:],prev=self)
else:
self.next = raw[unreach.MIN_LEN:]
def hdr (self, payload):
return struct.pack('!I', self.unused)
class icmpv6 (packet_base):
"ICMP packet struct"
MIN_LEN = 4
def __init__ (self, raw=None, prev=None, **kw):
super(icmpv6, self).__init__()
self.prev = prev
self.type = 0
self.code = 0
self.csum = 0
if raw is not None:
self.parse(raw)
self._init(kw)
def _calc_checksum (self):
ph = self.prev.srcip.raw + self.prev.dstip.raw
ph += struct.pack('!IHBB', len(self.raw), 0, 0, 58) # 58 == ICMPv6
return checksum(ph + self.raw, skip_word=21)
@property
def checksum_ok (self):
if not self.prev: return True
if getattr(self, 'raw', None) is None: return True
return self.csum == self._calc_checksum()
def _to_str (self):
t = _type_to_name.get(self.type, str(self.type))
cs = ''
if not self.checksum_ok:
cs = " BAD_CHECKSUM(%02x!=%02x)" % (self.csum, self._calc_checksum())
s = '[ICMP+%s/%i%s]' % (t, self.code, cs)
return s
def parse (self, raw, buf_len=None):
assert isinstance(raw, bytes)
if buf_len is None:
buf_len = len(raw)
self.raw = raw[:buf_len]
else:
self.raw = raw
dlen = len(self.raw)
if dlen < self.MIN_LEN:
self.msg('(icmp parse) warning ICMP packet data too short to '
+ 'parse header: data len %u' % (dlen,))
return
(self.type, self.code, self.csum) \
= struct.unpack('!BBH', raw[:self.MIN_LEN])
#self.parsed = True
if not self.checksum_ok:
self.msg("Bad ICMPv6 checksum")
self.next = raw[self.MIN_LEN:]
return
else:
self.parsed = True
#TODO: Use a class registry
cls = {
TYPE_ECHO_REQUEST:echo,
TYPE_ECHO_REPLY:echo,
TYPE_PACKET_TOO_BIG:PacketTooBig,
TYPE_TIME_EXCEED:TimeExceeded,
TYPE_DEST_UNREACH:unreach,
TYPE_ROUTER_SOLICITATION:NDRouterSolicitation,
TYPE_NEIGHBOR_SOLICITATION:NDNeighborSolicitation,
TYPE_ROUTER_ADVERTISEMENT:NDRouterAdvertisement,
TYPE_NEIGHBOR_ADVERTISEMENT:NDNeighborAdvertisement,
}.get(self.type)
if cls is None:
#cls = unknown
self.next = raw[self.MIN_LEN:]
return
offset,self.next = cls.unpack_new(raw, offset=self.MIN_LEN,
buf_len=buf_len,prev=self)
def hdr (self, payload):
payload_len = len(payload) + 4
ph = self.prev.srcip.raw + self.prev.dstip.raw
ph += struct.pack('!IHBBBBH', payload_len, 0, 0, 58, # 58 == ICMPv6
self.type, self.code, 0)
self.csum = checksum(ph + payload, 0, 21)
return struct.pack('!BBH', self.type, self.code, self.csum)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.forms import ValidationError # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.api import cinder
class CreateVolumeType(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"))
vol_type_description = forms.CharField(
max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
def clean_name(self):
cleaned_name = self.cleaned_data['name']
if len(cleaned_name.strip()) == 0:
raise ValidationError(_('Volume type name can not be empty.'))
return cleaned_name
def handle(self, request, data):
try:
# Remove any new lines in the public key
volume_type = cinder.volume_type_create(
request,
data['name'],
data['vol_type_description'])
messages.success(request, _('Successfully created volume type: %s')
% data['name'])
return volume_type
except Exception as e:
if getattr(e, 'code', None) == 409:
msg = _('Volume type name "%s" already '
'exists.') % data['name']
self._errors['name'] = self.error_class([msg])
else:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to create volume type.'),
redirect=redirect)
class CreateQosSpec(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"))
consumer = forms.ChoiceField(label=_("Consumer"),
choices=cinder.CONSUMER_CHOICES)
def handle(self, request, data):
try:
qos_spec = cinder.qos_spec_create(request,
data['name'],
{'consumer': data['consumer']})
messages.success(request,
_('Successfully created QoS Spec: %s')
% data['name'])
return qos_spec
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to create QoS Spec.'),
redirect=redirect)
class CreateVolumeTypeEncryption(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"), required=False,
widget=forms.TextInput(attrs={'readonly':
'readonly'}))
provider = forms.CharField(max_length=255, label=_("Provider"))
control_location = forms.ChoiceField(label=_("Control Location"),
choices=(('front-end',
_('front-end')),
('back-end',
_('back-end')))
)
cipher = forms.CharField(label=_("Cipher"), required=False)
key_size = forms.IntegerField(label=_("Key Size (bits)"),
required=False,
min_value=1)
volume_type_id = forms.CharField(widget=forms.HiddenInput())
def handle(self, request, data):
try:
# Set Cipher to None if empty
if data['cipher'] is u'':
data['cipher'] = None
# Create encyrption for the volume type
volume_type = cinder.\
volume_encryption_type_create(request,
data['volume_type_id'],
data)
messages.success(request, _('Successfully created encryption for '
'volume type: %s') % data['name'])
return volume_type
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to create encrypted volume type.'),
redirect=redirect)
class ManageQosSpecAssociation(forms.SelfHandlingForm):
qos_spec_choice = forms.ChoiceField(
label=_("QoS Spec to be associated"),
help_text=_("Choose associated QoS Spec."))
def __init__(self, request, *args, **kwargs):
super(ManageQosSpecAssociation, self).__init__(request,
*args,
**kwargs)
qos_spec_field = self.fields['qos_spec_choice']
qos_spec_field.choices = \
self.populate_qos_spec_choices()
def populate_qos_spec_choices(self):
# populate qos spec list box
qos_specs = self.initial["qos_specs"]
current_qos_spec = self.initial["cur_qos_spec_id"]
qos_spec_list = [(qos_spec.id, qos_spec.name)
for qos_spec in qos_specs
if qos_spec.id != current_qos_spec]
if current_qos_spec:
# used to remove the current spec
qos_spec_list.insert(0, ("-1", _("None (removes spec)")))
if qos_spec_list:
qos_spec_list.insert(0, ("", _("Select a new QoS spec")))
else:
qos_spec_list.insert(0, ("", _("No new QoS spec available")))
return qos_spec_list
def handle(self, request, data):
vol_type_id = self.initial['type_id']
new_qos_spec_id = data['qos_spec_choice']
# Update QOS Spec association information
try:
# NOTE - volume types can only be associated with
# ONE QOS Spec at a time
# first we need to un-associate the current QOS Spec, if it exists
cur_qos_spec_id = self.initial['cur_qos_spec_id']
if cur_qos_spec_id:
qos_spec = cinder.qos_spec_get(request,
cur_qos_spec_id)
cinder.qos_spec_disassociate(request,
qos_spec,
vol_type_id)
# now associate with new QOS Spec, if user wants one associated
if new_qos_spec_id != '-1':
qos_spec = cinder.qos_spec_get(request,
new_qos_spec_id)
cinder.qos_spec_associate(request,
qos_spec,
vol_type_id)
messages.success(request,
_('Successfully updated QoS Spec association.'))
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Error updating QoS Spec association.'),
redirect=redirect)
class EditQosSpecConsumer(forms.SelfHandlingForm):
consumer_choice = forms.ChoiceField(
label=_("QoS Spec Consumer"),
choices=cinder.CONSUMER_CHOICES,
help_text=_("Choose consumer for this QoS Spec."))
def __init__(self, request, *args, **kwargs):
super(EditQosSpecConsumer, self).__init__(request, *args, **kwargs)
consumer_field = self.fields['consumer_choice']
qos_spec = self.initial["qos_spec"]
consumer_field.initial = qos_spec.consumer
def clean_consumer_choice(self):
# ensure that new consumer isn't the same as current consumer
qos_spec = self.initial['qos_spec']
cleaned_new_consumer = self.cleaned_data.get('consumer_choice')
old_consumer = qos_spec.consumer
if cleaned_new_consumer == old_consumer:
raise forms.ValidationError(
_('QoS Spec consumer value must be different than '
'the current consumer value.'))
return cleaned_new_consumer
def handle(self, request, data):
qos_spec_id = self.initial['qos_spec_id']
new_consumer = data['consumer_choice']
# Update QOS Spec consumer information
try:
cinder.qos_spec_set_keys(request,
qos_spec_id,
{'consumer': new_consumer})
messages.success(request,
_('Successfully modified QoS Spec consumer.'))
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request, _('Error editing QoS Spec consumer.'),
redirect=redirect)
class EditVolumeType(forms.SelfHandlingForm):
name = forms.CharField(max_length=255,
label=_("Name"))
description = forms.CharField(max_length=255,
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Description"),
required=False)
def clean_name(self):
cleaned_name = self.cleaned_data['name']
if len(cleaned_name.strip()) == 0:
msg = _('New name cannot be empty.')
self._errors['name'] = self.error_class([msg])
return cleaned_name
def handle(self, request, data):
volume_type_id = self.initial['id']
try:
cinder.volume_type_update(request,
volume_type_id,
data['name'],
data['description'])
message = _('Successfully updated volume type.')
messages.success(request, message)
return True
except Exception as ex:
redirect = reverse("horizon:admin:volumes:index")
if ex.code == 409:
error_message = _('New name conflicts with another '
'volume type.')
else:
error_message = _('Unable to update volume type.')
exceptions.handle(request, error_message,
redirect=redirect)
| |
# -*- coding: utf-8 -*-
import json
import requests
from django.conf import settings
from django.contrib.auth.models import User
from django.core.cache import cache
from django.db import models
from django.urls import reverse
from django.utils.safestring import mark_safe
from djforms.core.models import BINARY_CHOICES
from djforms.core.models import Department
from djforms.core.models import YEAR_CHOICES
from djtools.fields import NOW
from djtools.fields.validators import MimetypeValidator
from djtools.utils.mail import send_mail
from taggit.managers import TaggableManager
FILE_VALIDATORS = [MimetypeValidator('image/png')]
WORK_TYPES = (
('SURE', 'SURE'),
('Senior thesis', 'Senior thesis'),
('Independent research', 'Independent research'),
('Course project', 'Course project'),
("Master's thesis", "Master's thesis"),
)
PRESENTER_TYPES = (
('', '----select----'),
('Student', 'Student'),
('Faculty', 'Faculty'),
('Staff', 'Staff'),
)
YEAR = NOW.year
class Person(object):
"""
Dynamic 'person' object.
Usage:
data = {"name":"larry","email":"larry@carthage.edu"}
p = Person(**data)
p.id = 90125
etc
"""
def __init__(self, **entries):
"""Initialization method."""
self.__dict__.update(entries)
def get_json(yuri):
"""Obtain the json data from directory API."""
jason = cache.get('{0}_api_json'.format(yuri))
if jason is None:
# read the json data from URL
earl = "{0}{1}/api/json.txt?api_key={2}".format(
settings.API_PEOPLE_URL, yuri, settings.API_KEY,
)
response = requests.get(earl)
#jason_data = json.loads(response.text)
#data = response.read()
# json doesn't like trailing commas, so...
#data = data.replace(',]', ']')
data = response.text
jason = json.loads(response.text)
cache.set('{0}_api_json'.format(yuri), jason)
return jason
def get_people(yuri):
"""Obtain the group of people from the directory API."""
people = cache.get('{0}_api_objects'.format(yuri))
if people is None:
jason = get_json(yuri)
people = {}
for jay in jason:
person = Person(**jay)
people[person.cid] = person
cache.set('{0}_api_objects'.format(yuri), people)
return people
class Presenter(models.Model):
"""Data model class for the presenter."""
date_created = models.DateTimeField("Date Created", auto_now_add=True)
date_updated = models.DateTimeField("Date Updated", auto_now=True)
college_id = models.CharField(max_length=8, null=True, blank=True)
first_name = models.CharField(max_length=128, null=True, blank=True)
last_name = models.CharField(max_length=128, null=True, blank=True)
email = models.CharField(max_length=128, null=True, blank=True)
leader = models.BooleanField("Presentation leader", default=False)
prez_type = models.CharField(
"Presenter type",
max_length=16,
choices=PRESENTER_TYPES,
null=True,
blank=True,
)
college_year = models.CharField(
"Current year at Carthage",
max_length=1,
choices=YEAR_CHOICES,
null=True,
blank=True,
)
major = models.CharField(max_length=128, null=True, blank=True)
hometown = models.CharField(max_length=128, null=True, blank=True)
sponsor = models.CharField(max_length=128, null=True, blank=True)
sponsor_name = models.CharField(max_length=128, null=True, blank=True)
sponsor_email = models.CharField(max_length=128, null=True, blank=True)
sponsor_other = models.CharField(max_length=255, null=True, blank=True)
department = models.ForeignKey(
Department,
on_delete=models.CASCADE,
null=True,
blank=True,
)
mugshot = models.ImageField(
max_length=255,
upload_to="files/scholars/mugshots",
help_text="75 dpi and .jpg only",
)
ranking = models.IntegerField(null=True, blank=True, default=0)
def __str__(self):
"""Default value for the objects."""
return '{0} {1}'.format(self.first_name, self.last_name)
def save(self, *args, **kwargs):
"""Override the save() method to update some things first."""
if self.sponsor:
faculty = get_people("faculty")
try:
self.sponsor_name = '{0} {1}'.format(
faculty[self.sponsor].firstname,
faculty[self.sponsor].lastname,
)
self.sponsor_email = faculty[self.sponsor].email
except Exception:
self.sponsor_name = settings.COS_DEFAULT_NAME
self.sponsor_email = settings.COS_DEFAULT_EMAIL
super(Presenter, self).save()
def year(self):
"""Deal with academic years."""
if self.college_year:
year = YEAR_CHOICES[int(self.college_year)][1]
else:
year = None
return year
def presenter_type(self):
"""Display the presenter type."""
return PRESENTOR_TYPES[self.prez_type][1]
class Presentation(models.Model):
"""Data model class for the presentation."""
user = models.ForeignKey(
User,
verbose_name="Created by",
related_name='presentation_created_by',
on_delete=models.CASCADE,
)
updated_by = models.ForeignKey(
User,
verbose_name="Updated by",
related_name='presentation_updated_by',
editable=False,
on_delete=models.CASCADE,
)
date_created = models.DateTimeField("Date Created", auto_now_add=True)
date_updated = models.DateTimeField("Date Updated", auto_now=True)
ranking = models.IntegerField(null=True, blank=True, default=0)
title = models.CharField("Presentation title", max_length=255)
reviewer = models.CharField(max_length=128, null=True, blank=True)
leader = models.ForeignKey(
Presenter,
verbose_name="Presentation leader",
related_name='presentation_leader',
on_delete=models.CASCADE,
null=True,
blank=True,
)
presenters = models.ManyToManyField(
Presenter, related_name="presentation_presenters", blank=True,
)
funding = models.CharField(
"Funding source (if applicable)",
max_length=255,
help_text="e.g. external funding, SURE, etc.",
null=True,
blank=True,
)
work_type = models.CharField(max_length=32, choices=WORK_TYPES)
permission = models.CharField(
"Permission to reproduce",
max_length=3,
choices=BINARY_CHOICES,
help_text="""
Do you grant Carthage permission to reproduce your presentation?
""",
)
shared = models.CharField(
"Faculty sponsor approval",
max_length=3,
choices=BINARY_CHOICES,
help_text="""
Has your faculty sponsor approved your proposal?
Note: Faculty and staff presenters should choose 'yes'.
""",
)
abstract_text = models.TextField(
"Abstract",
help_text="Copy and paste your abstract text or start typing.",
)
need_table = models.CharField(max_length=3, choices=BINARY_CHOICES)
need_electricity = models.CharField(max_length=3, choices=BINARY_CHOICES)
poster_file = models.FileField(
upload_to='files/scholars/posters/{0}'.format(YEAR),
validators=FILE_VALIDATORS,
help_text="Upload a poster file",
null=True,
blank=True,
)
status = models.BooleanField(default=False)
class Meta:
"""Sub-class for settings configurations about the parent class."""
ordering = ['date_created']
get_latest_by = 'date_created'
permissions = (('manage_presentation', 'manage presentation'),)
def __str__(self):
"""Display the default value."""
return self.title
def save(self, *args, **kwargs):
"""Override the save() method to update some things first."""
# send email if approved
if self.pk is not None:
prez = Presentation.objects.get(pk=self.pk)
if (prez.status != self.status) and self.status:
if settings.DEBUG:
TO_LIST = [settings.SERVER_EMAIL]
else:
TO_LIST = [self.user.email,]
BCC = settings.MANAGERS
email = settings.DEFAULT_FROM_EMAIL
subject = '[Celebration of Scholars] Presentation has been approved'
send_mail(
None,
TO_LIST,
subject,
email,
'scholars/presentation/approved_mail.html',
self,
BCC,
)
else:
self.updated_by = self.user
super(Presentation, self).save()
def get_absolute_url(self):
"""Return the default URL."""
return reverse('presentation_detail', kwargs={'pid': self.id})
def get_update_url(self):
"""Return the update URL."""
return reverse('presentation_update', kwargs={'pid': self.id})
def get_presenters(self):
"""Obtain all presenters for this presentation."""
return self.presenters.order_by('-leader', 'last_name')
def get_presenters_print(self):
"""Obtain all presenters for print."""
return self.presenters.order_by('last_name')
def mugshot_status(self):
"""Return the status if all the presenters have a mugshot or not."""
status = True
for presenter in self.presenters.all():
if not presenter.mugshot:
status = False
break
return status
def first_name(self):
"""Display the user's given name."""
return self.user.first_name
def last_name(self):
"""Display the user's sur name."""
return self.user.last_name
def email(self):
"""Display the user's email."""
return self.user.email
def sponsor(self):
"""Return the leader's sponsor email."""
if self.leader:
return self.leader.sponsor_email
else:
return None
def sponsor_other(self):
"""Return the leader's sponsor."""
if self.leader:
return self.leader.sponsor_other
else:
return None
def poster(self):
"""Return all of the posters."""
poster = False
if self.poster_file:
poster = mark_safe(
'<a href="https://{0}/assets/{1}">Download</a>'.format(
settings.SERVER_URL, self.poster_file,
),
)
return poster
poster.allow_tags = True
def presentation_type(self):
"""Return the presentation type."""
return WORK_TYPES[self.work_type][1]
| |
#!/usr/bin/python
#
# See https://github.com/MikeStitt/simple-locating/blob/master/license.txt for license.
import math
import cv2
import numpy as np
import scipy as Sci
import scipy.linalg
pi = math.pi
debug_found = ''
leftmost = 0
rightmost = 0
az1 = 0.
east1 = 0.
south1 = 0.
az2 = 0.
east2 = 0.
south2 = 0.
#
# Convert from degrees to radians
#
def deg2rad( d ):
return 2.0 * pi * d / 360.0
# Step 0e
# Camera Field of View equations
#
# Camera x (width) Field of View
#
camera_x_fov_deg = 43.5 # degrees
camera_x_fov_rad = deg2rad( camera_x_fov_deg ) # radians
#
# Camera pixels in width (x) and height (y)
#
camera_x_pixels = 320.0 # pixels
camera_y_pixels = 240.0 # pixels
#
# Camera focal length in pixels.
# See http://en.wikipedia.org/wiki/Angle_of_view
#
# Use angle = 2 atan( d/(2f))
#
# This equation assumes d and angle is centered in the field of view.
#
# Solve angle = 2 atan( d/(2f ) for f:
# f = d / ( 2*tan(angle/2))
#
camera_focal_len = camera_x_pixels / ( 2.0 * math.tan( camera_x_fov_rad / 2.0 )) # pixels
# Step 0f
# Use pixel location and camera focal length to turn the camera into
# a angle measuring device.
#
# Use the 1/2 angle form of angle = 2 atan( d/(2f))
# angle = atan( d/f )
#
# This equation assumes d and angle is from the center of the field of view.
#
def pixel2rad( pixel ):
return math.atan( pixel / camera_focal_len ) # + is up or right
#
# Camera height above ground
#
camera_height = 52.0 # inches
camera_initial_pitch_error = 0.0 # radians, + is pitched up
#
# Optical target dimensions
#
#target_width = 24.0 # inches
target_height = 18.0 # inches
#
# Order the constants so that left is least, low and top are in the middle, and right is highest
#
MID_LEFT = 0
LOW = 1
UNKNOWN = 2
MID_UNKNOWN = 3
TOP = 4
MID_RIGHT = 5
# Step 0d
#
# define a class to hold a table of where the targets are on the field
#
# Definition of field coordinate system.
#
# assumes we are shooting at the blue alliance target on the left edge of
# the field see: http://frc-manual.usfirst.org/viewItem/55#2.1
#
# south increasing as we move to right, towards blue alliance station, 0 inches is at the blue backboards
# east increases as me move up towards, the red kinect station, 0 inches is at center of the top and bottom hoops
# up increases as we move off the ground, 0 inches is at the ground
#
# a heading of 0 is facing due north towards the targets, + radians is towards the right (east), -radians is
# towards the left (wast)
#
class target_position:
def __init__(self,l,r,t,b,h):
self.left_inches = l # east coordinate of left edge
self.right_inches = r # east coordinate of right edge
self.top_inches = t # up coordinate of top edge
self.bottom_inches = b # up coordinate of bottom edge
self.hoop_height = h # up coordinate of hoop
self.center_east = (l+r)/2.0 # east coordinate center
self.center_up = (t+b)/2.0 # up coordinate
#
# Height of hoop above ground
#
LOW_HOOP_UP = 28.0 # inches
MID_HOOP_UP = 61.0 # inches
TOP_HOOP_UP = 98.0 # inches
#
# Center of middle hoop
#
MID_LEFT_HOOP_EAST = -27.38 # inches
MID_RIGHT_HOOP_EAST = +27.38 # inches
#
# Target edges from center of hoop
#
TARGET_LEFT_DELTA = -12.0 #inches
TARGET_RIGHT_DELTA = +12.0 #inches
TARGET_TOP_DELTA = +20.0 #inches
TARGET_BOTTOM_DELTA = +2.0 #inches
#define a dictionary look up table for the targe locations
target_locs = { LOW: target_position( 0.0+TARGET_LEFT_DELTA, # l = left edge
0.0+TARGET_RIGHT_DELTA, # r = right edge
LOW_HOOP_UP+TARGET_TOP_DELTA, # t = top edge
LOW_HOOP_UP+TARGET_BOTTOM_DELTA, # b = bottom edge
LOW_HOOP_UP), # h = hoop height
# Default an unknown middle level hoop to be the left hoop
MID_UNKNOWN: target_position( MID_LEFT_HOOP_EAST+TARGET_LEFT_DELTA, # l = left edge
MID_LEFT_HOOP_EAST+TARGET_RIGHT_DELTA, # r = right edge
MID_HOOP_UP+TARGET_TOP_DELTA, # t = top edge
MID_HOOP_UP+TARGET_BOTTOM_DELTA, # b = bottom edge
MID_HOOP_UP), # h = hoop height
MID_LEFT: target_position( MID_LEFT_HOOP_EAST+TARGET_LEFT_DELTA, # l = left edge
MID_LEFT_HOOP_EAST+TARGET_RIGHT_DELTA, # r = right edge
MID_HOOP_UP+TARGET_TOP_DELTA, # t = top edge
MID_HOOP_UP+TARGET_BOTTOM_DELTA, # b = bottom edge
MID_HOOP_UP), # h = hoop height
MID_RIGHT: target_position( MID_RIGHT_HOOP_EAST+TARGET_LEFT_DELTA, # l = left edge
MID_RIGHT_HOOP_EAST+TARGET_RIGHT_DELTA, # r = right edge
MID_HOOP_UP+TARGET_TOP_DELTA, # t = top edge
MID_HOOP_UP+TARGET_BOTTOM_DELTA, # b = bottom edge
MID_HOOP_UP), # h = hoop height
TOP: target_position( 0.0+TARGET_LEFT_DELTA, # l = left edge
0.0+TARGET_RIGHT_DELTA, # r = right edge
TOP_HOOP_UP+TARGET_TOP_DELTA, # t = top edge
TOP_HOOP_UP+TARGET_BOTTOM_DELTA, # b = bottom edge
TOP_HOOP_UP) } # h = hoop height
# state variables
camera_pitch_error = camera_initial_pitch_error
class target:
#
# Step 1:
# When we find a target record where we found the edges in pixels:
#
def __init__(self,l,r,t,b):
self.left_pixels = l
self.right_pixels = r
self.top_pixels = t
self.bottom_pixels = b
self.pos = UNKNOWN
#
# Step 2:
# Convert the pixel locations to angles from the center line of the camera:
#
def est_initial_angles(self):
self.left_rad = pixel2rad( self.left_pixels - camera_x_pixels / 2.0 )
self.right_rad = pixel2rad( self.right_pixels - camera_x_pixels / 2.0 )
self.top_rad = pixel2rad( self.top_pixels - camera_y_pixels / 2.0 )
self.bottom_rad = pixel2rad( self.bottom_pixels - camera_y_pixels / 2.0 )
#
# Step 3:
# Azimuth is left to right angle from the center line of the camera. +angles are to the right.
# Elevation is down to up angle from the center line of the camera. +angles are up.
#
# Estimate the Azimuth and Elevation from the camera to the center of the target.
#
self.azimuth_rad = (self.left_rad + self.right_rad) / 2.0 # + is right
self.elevation_rad = (self.top_rad + self.bottom_rad) / 2.0 - camera_pitch_error # + is up
#
# Step 4:
# Initial estimate of the distance to this target based upon the vertical degrees this target takes in the
# field of view.
#
self.dist_est_1 = target_height / ( math.tan(self.top_rad+camera_pitch_error)-math.tan(self.bottom_rad+camera_pitch_error) )
#
# Step 5:
# Initial estimate of the height of the center of this target above ground based upon the
# distance to the target, the angle of the target, and the camera height
#
self.height_est_1 = self.dist_est_1 * math.tan(self.elevation_rad) + camera_height
#
# Step 6:
# Classify the target as a low, middle or top target based upon it's height above ground.
#
if ( self.height_est_1 < 55.5 ):
self.level = LOW
elif ( self.height_est_1 < 90.5 ):
self.level = MID_UNKNOWN
else:
self.level = TOP
#
# Step 8:
# Given the minimum azimuth (most left target), and maximum azimuth (most right target)
# if we have identified more than one target, classify the middle level targets as the
# left or the right middle.
#
def classify_pos( self, min_az, max_az ):
if self.level == MID_UNKNOWN:
if min_az == max_az:
self.pos = MID_UNKNOWN
elif self.azimuth_rad == min_az:
self.pos = MID_LEFT
elif self.azimuth_rad == max_az:
self.pos = MID_RIGHT
else:
self.pos = MID_UNKNOWN # should not reach this line, becaue if we
# found a mid and another target, the mid
# should be min_az or max_az
else:
self.pos = self.level
#Step 11
# Given 3 camera angles to 3 veritcal lines along the wall of backboards, estimate the
# camera heading (azimuth), east position, and south position
#
# See https://github.com/MikeStitt/simple-locating-docs/blob/master/mathToFindLocationFromAnglesTo3PointsOnALine.pdf?raw=true
#
#
def estimate_pos_3_sep_hrz_angles( left_rad, mid_rad, right_rad, left_pos, mid_pos, right_pos ):
a0 = mid_rad - left_rad
a1 = right_rad - mid_rad
b0 = mid_pos - left_pos
b1 = right_pos - mid_pos
A = math.atan2( -b0 , ( (b1/math.tan(a1)) - (b1+b0)/math.tan(a1+a0)))
ak = pi/2-A
alpha_k = pi/2-ak
alpha_1 = pi/2-ak-a1
k = b1 * math.tan(alpha_1) / (math.tan(alpha_k)-math.tan(alpha_1))
d = k * math.tan(alpha_k)
# ( azimuth, east, south )
return (-(right_rad+ak-pi), right_pos+k, d )
#
# Given a list of found rectangles,
# invoke steps 2 through 12 on the rectangles
#
def where( rectangles ):
global debug_found
global leftmost, rightmost, az1, east1, south1, az2, east2, south2
# Invoke steps 2 through 6 for each target found
#
for r in rectangles:
r.est_initial_angles()
# Step 7
# Find the center target azimuth that is most left and most right
#
min_azimuth = +pi # start at +180 which is out of view to right
max_azimuth = -pi # start at -180 which is out of view to left
for r in rectangles:
min_azimuth = min( min_azimuth, r.azimuth_rad )
max_azimuth = max( max_azimuth, r.azimuth_rad )
# Invoke step 8 for each target
#
for r in rectangles:
r.classify_pos( min_azimuth, max_azimuth )
# For debugging purposes identify the rectangles we found
#
ml = '--'
mr = '--'
bt = '--'
tp = '--'
mu = '--'
for r in rectangles:
if r.pos == LOW:
bt = 'BT'
elif r.pos == MID_UNKNOWN:
mu = 'MU'
elif r.pos == MID_LEFT:
ml = 'ML'
elif r.pos == MID_RIGHT:
mr = 'MR'
elif r.pos == TOP:
tp = 'TP'
debug_found = '{0:s}{1:s}{2:s}{3:s}{4:s}'.format( ml, bt, tp, mr, mu )
# Step 9
# Identify the left most and right most targets.
leftmost = MID_RIGHT+1
rightmost = MID_LEFT-1
for r in rectangles:
if r.pos < leftmost :
leftmost = r.pos
left = r
if r.pos > rightmost :
rightmost = r.pos
right=r
# Step 10
# If we have found two different targets, and they are not just the top and bottom targes.
# Then perform step 11 on two sets of 3 angles angles two the 3 targets.
#
if (leftmost != MID_RIGHT+1) and (leftmost != rightmost) and not((leftmost==LOW) and (rightmost==TOP)) :
# take two estimates of position with 3 angles
# Perform step 11 using the using both vertical edges of the far left target
# and the right edge of the far right target.
#
# See definition of field coordinate system.
# az1 and az2 are estimates of the camera heading in azimuth in radians
# east1 and east2 are estimates of the camera east position
# south1 and south2 are estimates of the camera south position
#
az1, east1, south1 = estimate_pos_3_sep_hrz_angles( left.left_rad,
left.right_rad,
right.right_rad,
target_locs[left.pos].left_inches,
target_locs[left.pos].right_inches,
target_locs[right.pos].right_inches)
# Perform step 11 using the using the left vertical edge of the far left target
# and the both vertical edges of the far right target.
az2, east2, south2 = estimate_pos_3_sep_hrz_angles( left.left_rad,
right.left_rad,
right.right_rad,
target_locs[left.pos].left_inches,
target_locs[right.pos].left_inches,
target_locs[right.pos].right_inches)
#
# Step 12.
# Average the two passes of passes for an estimate of the camera position and heading
#
return ( (az1+az2)/2.0, (east1+east2)/2.0, (south1+south2)/2.0 )
else:
return( -1000*pi, -1000, -1000 )
#
# Step 13.
# For a target rectangle calculate the azimuth offset from the center of the
# center of the backboard to the center of the hoop.
#
def target_backboard_az_and_az_offset( target, east, south ):
#
# Hoop Center is 15 inches south of center of the backboard.
#
target_east = target_locs[target.pos].center_east
target_south = -15.0 # inches
backboard_center_az = math.atan2(target_east-east,-south)
target_center_az = math.atan2(target_east-east,target_south-south)
az_offset = target_center_az - backboard_center_az
return backboard_center_az, az_offset
#
# Step 14.
# For a target rectangle calculate the range along the floor from the center
# of the camera to the center of the hoop.
#
def target_range( target, east, south ):
target_east = target_locs[target.pos].center_east
target_south = -15.0
return math.sqrt( math.pow(target_east-east,2) + math.pow(target_south-south,2) )
| |
from __future__ import division, print_function
import numpy as np
from scipy.signal import fftconvolve, convolve
import itertools
"""
08/31/17
Author: Rex McArthur
Creates a class of n-dim chebyshev polynomials. Tracks leading term,
coefficents, and inculdes basic operations (+,*,scaler multip, etc.)
Assumes GRevLex ordering, but should be extended.
"""
class GrevlexGen(object):
"""
_____ params _______
dim: int, number of variables, dimension of chebyshev system
terms: int, highest term of single variable chebyshev polynomials
coeff: list(terms**dim) or np.array ([terms,] * dim), coefficents in given ordering
order: string, monomial ordering desired for Grubner calculations
lead_term: list, the index of the current leading coefficent
_____ methods ______
next_step:
input- Current: list, current location in ordering
output- the next step in ordering
"""
def __init__(self, dim, shape, order='grevlex', lead_term=None):
'''
terms, int- number of chebyshev polynomials each variable can have. Each dimension will have term terms
dim, int- number of different variables, how many dim our tensor will be
order, string- how you want to order your polynomials. Grevlex is default
'''
#self.coeff = coeff
self.dim = dim
self.shape = shape
#self.dim = self.coeff.ndim
#self.terms = np.prod(self.coeff.shape)
#self.order = order
#self.shape = self.coeff.shape
self.max_term = np.max(self.shape) -1
def next_step(self, current):
'''
Used to calculate next step in the grevlex generator
'''
for i in range(self.dim-1, 0, -1):
i = int(i)
if i!= self.dim-1 and current[i] == 0:
break
elif i!= 0 and current[i] < self.max_term and current[i-1] > 0:
current[i] += 1
current[i-1] -= 1
return current
if len(current.nonzero()) > 0:
##### This is the problem ######
first_z = -1 * next(j for j,v in enumerate(current[::-1]) if v==0) - 1
# Finds the first non-zero afte a zero and iterates from there to create the
# Next high state
first_nz_after_z = -1*next(i for i,v in enumerate(current[first_z::-1]) if v!=0) -1
j = first_z + first_nz_after_z + 1
current[j] -= 1
current[j+1:] = self._calc_high_state(current[j+1:], self.state_sum-np.sum(current[:j+1]))
return current
raise ValueError("Condition not covered in step func")
def grevlex_gen(self, current=None):
'''
yields grevlex ordering co-ordinates in order to find
the leading coefficent
#TODO: Currently this is requiring square matricies, let's make it work for not square.
'''
self.state_sum = sum(np.array(self.shape)-1)
if current == None:
current = np.array(self.shape) -1
print('state sum', self.state_sum)
print('shaep', self.shape)
print('max term', self.max_term)
print('current', current)
low_state = self._calc_low_state(current)
self.state_sum = np.sum(current)
last_i = np.zeros_like(current)
last_i[-1] = 1
yield current
while True:
if all(current == last_i):
yield np.zeros_like(current)
return
elif all(current == low_state):
#print('Current -- lw_state')
#print('State Sum: {}'.format(self.state_sum))
#raw_input()
self.state_sum -= 1
current = self._calc_high_state(current, self.state_sum)
low_state = self._calc_low_state(current)
yield current
else:
current = self.next_step(current)
yield current
def _calc_low_state(self,current):
#print(self.shape)
max_term = np.max(self.shape) -1
#print('max_term')
#print(max_term)
if self.state_sum < max_term:
low_state = np.zeros_like(current)
low_state[-1] = self.state_sum
return low_state
else:
#print('State sum: {}'.format(self.state_sum))
#print('terms: {}'.format(self.dim))
#print(self.shape)
#raw_input()
slots = int(self.state_sum//max_term)
remainder = self.state_sum % max_term
low_state = np.zeros_like(current)
low_state[-slots:] = (self.shape[0]-1)*np.ones(1)
if remainder != 0:
low_state[-slots - 1] = remainder
return low_state.astype(int)
def _calc_high_state(self, current, sum_val):
max_term = np.max(self.shape) -1
slots = int(sum_val//max_term)
remainder = sum_val % max_term
high_state = np.zeros_like(current)
high_state[:slots] = (max_term)*np.ones(1)
if remainder != 0:
high_state[slots] = remainder
return high_state.astype(int)
def update_lead_term(self,start = None):
#print('Updating Leading Coeff...')
if self.order == 'grevlex':
gen = self.grevlex_gen()
for idx in gen:
if self.coeff[tuple(idx)] != 0:
self.lead_term = idx
self.lead_coeff = self.coeff[tuple(idx)]
break
#print('Leading Coeff is {}'.format(self.lead_term))
def __lt__(self, other):
'''
Magic method for determing which polynomial is smaller
'''
if sum(self.lead_term) < sum(other.lead_term):
return True
elif sum(self.lead_term) > sum(other.lead_term):
return False
else:
for i in xrange(len(self.lead_term)):
if self.lead_term[i] < other.lead_term[i]:
return True
if self.lead_term[i] > other.lead_term[i]:
return False
if self.coeff[tuple(self.lead_term)] < other.coeff[tuple(other.lead_term)]:
return True
def __gt__(self, other):
'''
Magic method for determing which polynomial is smaller
'''
if sum(self.lead_term) < sum(other.lead_term):
return False
elif sum(self.lead_term) > sum(other.lead_term):
return True
else:
for i in xrange(len(self.lead_term)):
if self.lead_term[i] < other.lead_term[i]:
return False
if self.lead_term[i] > other.lead_term[i]:
return True
if self.coeff[tuple(self.lead_term)] < other.coeff[tuple(other.lead_term)]:
return False
def __add__(self,other):
'''
Here we add an addition method
'''
return MultiCheb(self.coeff + other.coeff)
def __sub__(self,other):
'''
Here we subtract the two polys coeffs
'''
return MultiCheb(self.coeff - other.coeff)
def match_size(self,a,b):
'''
Matches the size of the polynomials
'''
new_shape = [max(i,j) for i,j in itertools.izip_longest(a.shape, b.shape)]
add_a = [i-j for i,j in zip(new_shape, a.shape)]
add_b = [i-j for i,j in zip(new_shape, b.shape)]
add_a_list = np.zeros((2,len(new_shape)))
add_b_list = np.zeros((2,len(new_shape)))
add_a_list[:,1] = add_a
add_b_list[:,1] = add_b
a = MultiCheb(np.pad(a.coeff,add_a_list.astype(int),'constant'))
b = MultiCheb(np.pad(b.coeff,add_b_list.astype(int),'constant'))
return a,b
def __mul__(self,other):
'''
Multiply by convolving intelligently
CURRENTLY ONLY DOING 2-D support
Manually make 1, 3D support then add n-dim support
'''
# Check and see if same size
if self.shape != other.shape:
new_self, new_other = self.match_size(self,other)
else:
new_self, new_other = self, other
c = new_other.coeff[::-1, ::-1]
p1 = convolve(new_self.coeff,new_other.coeff)
temp = convolve(new_self.coeff,c)
half = len(p1)//2
p2 = temp[:half+1,:][::-1] + temp[half:,:]
p2[0,:] = p2[0,:]/2.
p2 = p2[:,:half+1][:, ::-1] + p2[:,half:]
p2[:,0] = p2[:,0]/2.
p_z = np.zeros_like(p1)
p_z[:half+1, :half+1] = p2
new_coeff = .5*(p1 + p_z)
print(new_coeff)
new_coeff = np.around(new_coeff, 6)
print(new_coeff)
raw_input()
#TODO: You can use the lead_term kwarg to save some time
return MultiCheb(new_coeff)
| |
#! /usr/bin/env python3
###############
###IMPORTS###
###############
# LIBRARIES
import datetime
import multiprocessing
import os
import random
import shutil
import string
import sys
import tempfile
import time
from queue import Queue
from threading import Thread
# OTHER SCRIPTS
import arguments as arguments
import collectOnly as collect
import consensusIAssembler as consensus
import dirsAndFiles as logistic
import evmPipeline
import getRightStrand as grs
import handlers as handler
import interproscan as iprscan
import manipulateSeq as mseq
import mapping
import multithreadLargeFasta as multiple
import pasa as pasa
import prepareEvmInputs as inputEvm
import reduceUTRs as utrs
import transcriptAssembly as transcripts
import update as update
###############
###MAIN###
###############
def main():
fmtdate = '%H:%M:%S %d-%m'
now = datetime.datetime.now().strftime(fmtdate)
home = os.path.expanduser("~")
args = arguments.setting()
if args.pasa_db == "":
pasadb = ''.join(random.sample(string.ascii_lowercase, 5))
else:
pasadb = args.pasa_db
augustus_species = logistic.augustus_species_func()
if not augustus_species.get(args.species) and args.long_reads == "" and args.short_reads == "":
sys.exit("#####PLEASE DEFINE A SPECIES NAME OR ANY KIND OF RNA-SEQ AND RE-RUN\t" + now + "\t#####\n")
max_threads = multiprocessing.cpu_count()
gmap_name = args.reference + '_GMAPindex'
pasa_name = 'assembler-' + pasadb
if args.upgrade == "":
protein_loc = os.path.abspath(args.proteins)
iprscan_log = iprscan.check_iprscan()
# Useful variables for later
root = os.getcwd()
#if args.out_dir != "":# and args.out_dir.startswith("/"):
# output_dir = os.path.join(root, "LoReAn" + args.out_dir)
#else:
output_dir = os.path.join(root, "LoReAn_" + args.out_dir)
logistic.check_create_dir(output_dir)
if args.keep_tmp or args.verbose:
wd = os.path.join(output_dir, "run/")
logistic.check_create_dir(wd)
else:
temp_dir = tempfile.TemporaryDirectory(prefix='run_', dir=output_dir, suffix="/", )
wd = temp_dir.name
if args.upgrade == "":
#if not os.path.isfile(home + "/.gm_key"):
# sys.exit("#####LOREAN STOPS HERE. CHECK THAT THE gm_key IS IN THE HOME FOLDER#####\n")
if args.proteins == "":
if not args.keep_tmp or not args.verbose:
shutil.rmtree(wd)
sys.exit("#####LOREAN STOPS HERE. CHECK THAT THE PROTEIN OPTION IS SET#####\n")
if args.long_reads != "":
if args.stranded or args.adapter:
if args.adapter == '':
adapter_value = True
sys.stdout.write('### RUNNING IN STRAND MODE AND FINDING ADAPTER AUTOMATICALLY ###\n')
stranded_value = True
else:
adapter_value = args.adapter
sys.stdout.write('### RUNNING IN STRAND MODE AND USING ADAPTER PROVIDED ###\n')
stranded_value = True
else:
stranded_value = False
sys.stdout.write('### RUNNING IN NON-STRAND MODE ###\n')
adapter_value = False
ref_orig = os.path.abspath(args.reference)
ref_link = os.path.join(wd, args.reference)
if not os.path.exists(ref_link):
shutil.copyfile(ref_orig, ref_link)
long_reads = args.long_reads
fasta = (".fasta", ".fa", ".fas", ".fsta")
fastq = (".fastq", ".fq")
'''Core of the program'''
# Parse the arguments
if int(args.threads) > max_threads:
threads_use = str(max_threads)
sys.stdout.write(('### MAX NUMBER OF USED THREADS IS ' + str(max_threads) + ' AND NOT ' + args.threads + ' AS SET ###\n'))
else:
threads_use = args.threads
if args.external:
external_file = args.external
else:
external_file = ''
if args.upgrade == "":
if args.species == "":
sys.exit("#####PLEASE DEFINE A SPECIES NAME\t" + now + "\t#####\n")
else:
if args.short_reads == '' and long_reads == '':
if external_file.endswith("gff3") or external_file.endswith(fasta):
weights_dic = {'Augustus': args.augustus_weigth, 'GeneMark.hmm': args.genemark_weigth, 'exonerate': args.exonerate_weigth,
'external' : args.external_weigth}
else:
weights_dic = {'Augustus': args.augustus_weigth, 'GeneMark.hmm': args.genemark_weigth, 'exonerate': args.exonerate_weigth}
elif args.short_reads != '' or long_reads != '':
if external_file.endswith("gff3") or external_file.endswith(fasta):
weights_dic = {'Augustus': args.augustus_weigth, pasa_name: args.pasa_weigth, 'GeneMark.hmm': args.genemark_weigth,
'exonerate': args.exonerate_weigth, gmap_name: args.trinity_weigth, 'external' : args.external_weigth}
else:
weights_dic = {'Augustus': args.augustus_weigth, pasa_name: args.pasa_weigth, 'GeneMark.hmm': args.genemark_weigth,
'exonerate': args.exonerate_weigth, gmap_name: args.trinity_weigth}
final_files = [] # STORE THE IMPORTANT OUTPUT FILES
logistic.check_create_dir(wd)
logistic.check_file(ref_link)
gmap_wd = os.path.join(wd ,'gmap_output/')
exonerate_wd = os.path.join(wd , 'exonerate')
pasa_dir = os.path.join(wd , 'PASA/')
star_out = os.path.join(wd , 'STAR/')
trin_dir = os.path.join(wd , 'Trinity/')
evm_inputs_dir = os.path.join(wd , 'evm_inputs/')
braker_folder = os.path.join(wd , 'braker/')
evm_output_dir = os.path.join(wd , 'evm_output/')
interproscan_out_dir = os.path.join(wd , 'interproscan')
wd_split = os.path.join(wd , 'split/')
logistic.check_create_dir(wd_split)
logistic.check_create_dir(evm_inputs_dir)
logistic.check_create_dir(evm_output_dir)
logistic.check_create_dir(trin_dir)
logistic.check_create_dir(star_out)
logistic.check_create_dir(pasa_dir)
logistic.check_create_dir(gmap_wd)
logistic.check_create_dir(exonerate_wd)
if args.interproscan:
logistic.check_create_dir(interproscan_out_dir)
if long_reads:
consensus_wd = os.path.join(wd , 'consensus/')
logistic.check_create_dir(consensus_wd)
if long_reads != "" or args.short_reads != "":
logistic.check_gmap(threads_use, 'samse', args.min_intron_length, args.max_intron_length, args.end_exon, gmap_wd,
args.verbose)
if args.repeat_masked != "":
sys.stdout.write(('###MASKING THE GENOME STARTED AT:\t' + now + '\t###\n'))
masked_ref = mseq.maskedgenome(wd_split, ref_link, args.repeat_masked, args.repeat_lenght, args.verbose)
elif args.mask_genome:
sys.stdout.write(('###RUNNNG REPEATSCOUT AND REPEATMASK TO MASK THE GENOME STARTED AT:\t' + now + '\t###\n'))
masked_ref, repeats_families, repeats_gff = mseq.repeatsfind(ref_link, wd_split, threads_use, args.verbose)
if os.path.exists(repeats_families):
final_files.append(repeats_families)
if os.path.exists(repeats_gff):
final_files.append(repeats_gff)
else:
masked_ref = ref_link
list_fasta_names, dict_ref_name, ref_rename = multiple.single_fasta(masked_ref, wd_split)
if args.short_reads or long_reads:
if int(threads_use) > 1:
trinity_cpu = int(int(threads_use) / int(2))
else:
trinity_cpu = int(threads_use)
now = datetime.datetime.now().strftime(fmtdate)
# SHORT READS
if args.short_reads.endswith(fastq):
sys.stdout.write(('###STAR MAPPING STARTED AT:\t' + now + '\t###\n'))
if ',' in args.short_reads:
paired_end_files = args.short_reads.split(',')
short_1 = os.path.abspath(paired_end_files[0])
short_2 = os.path.abspath(paired_end_files[1])
short_reads_file = [short_1, short_2]
else:
short_reads_file = os.path.abspath(args.short_reads)
# Map with STAR
short_bam = mapping.star(ref_rename, short_reads_file, threads_use, args.max_intron_length, star_out,
args.verbose)
short_sorted_bam = mapping.samtools_sort(short_bam, threads_use, wd, args.verbose)
final_mapping_star = mapping.change_chr(short_sorted_bam, dict_ref_name, star_out, threads_use, args.verbose, "short")
default_bam = short_sorted_bam
# Keep the output
final_files.append(final_mapping_star)
# TRANSCRIPT ASSEMBLY
# TRINITY
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###TRINITY STARTS AT:\t' + now + '\t###\n'))
trinity_out = transcripts.trinity(short_sorted_bam, trin_dir, args.max_intron_length, trinity_cpu, args.verbose)
if args.upgrade == "":
trinity_gff3 = mapping.gmap('trin', ref_rename, trinity_out, threads_use, 'gff3_gene',
args.min_intron_length, args.max_intron_length, args.end_exon, gmap_wd,
args.verbose, Fflag=True)
trinity_path = trinity_gff3
long_sorted_bam = False
# BAM SORTED FILES GET IN HERE
elif args.short_reads.endswith("bam") or long_reads.endswith("bam"):
logistic.check_create_dir(star_out)
if args.short_reads.endswith("bam"):
map_reads = os.path.abspath(args.short_reads)
short_sorted_bam = mapping.change_chr_to_seq(map_reads, dict_ref_name, star_out, threads_use, args.verbose)
else:
map_reads = os.path.abspath(long_reads)
short_sorted_bam = mapping.change_chr_to_seq(map_reads, dict_ref_name, star_out, threads_use, args.verbose)
mapping.samtools_index(short_sorted_bam, star_out, args.verbose)
long_reads = transcripts.bamtofastq(short_sorted_bam, args.verbose)
#short_sorted_bam = os.path.abspath(args.short_reads)
default_bam = short_sorted_bam
# TRANSCRIPT ASSEMBLY
# TRINITY
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###TRINITY STARTS AT:\t' + now + '\t###\n'))
trinity_out = transcripts.trinity(short_sorted_bam, trin_dir, args.max_intron_length, trinity_cpu, args.verbose)
if args.upgrade == "":
trinity_gff3 = mapping.gmap('trin', ref_rename, trinity_out, threads_use, 'gff3_gene',
args.min_intron_length, args.max_intron_length, args.end_exon, gmap_wd,
args.verbose, Fflag=True)
trinity_path = trinity_gff3
long_sorted_bam = False
# LONG READS
elif long_reads.endswith(fastq) or long_reads.endswith(fasta):
# with this operation, reads are filtered for their length.
# Nanopore reads can be chimeras or sequencing artefacts.
# filtering on length reduces the amount of sequencing
# artefacts
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(("###FILTERING OUT LONG READS STARTED AT:\t" + now + "\t###\n"))
long_fasta, stranded_value = mseq.filterLongReads(long_reads, args.assembly_overlap_length, args.max_long_read, gmap_wd,
adapter_value, threads_use, args.adapter_match_score, ref_rename,
args.max_intron_length, args.verbose, stranded_value)
# If short reads have been mapped dont do it
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###GMAP\t' + now + 't###\n'))
if args.minimap2:
long_sam = mapping.minimap(ref_rename, long_fasta, threads_use, args.max_intron_length, gmap_wd, args.verbose)
else:
long_sam = mapping.gmap('sam', ref_rename, long_fasta, threads_use, 'samse',
args.min_intron_length, args.max_intron_length, args.end_exon, gmap_wd,
args.verbose, Fflag=False)
# Convert to sorted BAM
long_sorted_bam = mapping.sam_to_sorted_bam(long_sam, threads_use, gmap_wd, args.verbose)
sam_orig_id = mapping.change_chr(long_sorted_bam, dict_ref_name, gmap_wd, threads_use, args.verbose, "long")
default_bam = long_sorted_bam
# Keep the output
final_files.append(sam_orig_id)
# TRANSCRIPT ASSEMBLY
# TRINITY
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###TRINITY STARTS AT:\t' + now + '\t###\n'))
trinity_out = transcripts.trinity(long_sorted_bam, trin_dir, args.max_intron_length, trinity_cpu, args.verbose)
if args.upgrade == "":
trinity_gff3 = mapping.gmap('trin', ref_rename, trinity_out, threads_use, 'gff3_gene',
args.min_intron_length, args.max_intron_length, args.end_exon, gmap_wd,
args.verbose, Fflag=True)
trinity_path = trinity_gff3
else:
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###NO LONG READS FILE OR SHORT READS\t' + now + '\t###\n'))
# PASA Pipeline
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###PASA STARTS AT:\t' + now + '\t###\n'))
# Create PASA folder and configuration file
#align_pasa_conf = pasa.pasa_configuration(pasa_dir, pasadb, args.verbose)
# Launch PASA
if args.upgrade == "":
#if os.path.isfile(home + "/.gm_key") and args.proteins != "":
if args.proteins != "":
pasa_gff3 = pasa.pasa_call(pasa_dir, pasadb, ref_rename, trinity_out, args.max_intron_length,
threads_use, args.verbose)
final_files.append(grs.trasform_gff(pasa_gff3, dict_ref_name))
# HERE WE PARALLELIZE PROCESSES WHEN MULTIPLE THREADS ARE USED
if args.species in augustus_species:
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###AUGUSTUS, GENEMARK-ES AND EXONERATE STARTED AT:' + now + '\t###\n'))
queue = Queue()
for software in range(3):
queue.put(software) # QUEUE WITH A ZERO AND A ONE
for software in range(3):
t = Thread(target=handler.august_gmes_exonerate, args=(queue, ref_rename, args.species, protein_loc,
threads_use, args.fungus, list_fasta_names, wd, exonerate_wd,
args.verbose))
t.daemon = True
t.start()
queue.join()
augustus_file = wd + 'augustus/augustus.gff'
augustus_gff3 = inputEvm.convert_augustus(augustus_file, wd)
final_files.append(grs.trasform_gff(augustus_gff3, dict_ref_name))
genemark_file = wd + 'gmes/genemark.gtf'
genemark_gff3 = inputEvm.convert_genemark(genemark_file, wd)
final_files.append(grs.trasform_gff(genemark_gff3, dict_ref_name))
merged_prot_gff3 = wd + 'exonerate/protein_evidence.gff3'
final_files.append(grs.trasform_gff(merged_prot_gff3, dict_ref_name))
elif args.short_reads or long_reads: # USING PROTEINS AND SHORT READS
logistic.check_create_dir(braker_folder)
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###BRAKER1 (USING SHORT READS) AND EXONERATE STARTED AT:\t' + now + '\t###\n'))
queue = Queue()
for software in range(2):
queue.put(software) # QUEUE WITH A ZERO AND A ONE
for software in range(2):
t = Thread(target=handler.braker_exonerate, args=(queue, ref_rename, default_bam, args.species, protein_loc,
threads_use, args.fungus, wd,
braker_folder, exonerate_wd, args.verbose))
t.daemon = True
t.start()
queue.join()
augustus_file, genemark_file = inputEvm.braker_folder_find(braker_folder)
augustus_gff3 = inputEvm.convert_augustus(augustus_file, wd)
genemark_gff3 = inputEvm.convert_genemark(genemark_file, wd)
merged_prot_gff3 = wd + 'exonerate/protein_evidence.gff3'
final_files.append(grs.trasform_gff(augustus_gff3, dict_ref_name))
final_files.append(grs.trasform_gff(genemark_gff3, dict_ref_name))
final_files.append(grs.trasform_gff(merged_prot_gff3, dict_ref_name))
else: # USING PROTEINS AND LONG READS
queue = Queue()
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###BRAKER1 (USING LONG READS) AND EXONERATE STARTED AT: \t' + now + '\t###\n'))
logistic.check_create_dir(braker_folder)
for software in range(2):
queue.put(software) # QUEUE WITH A ZERO AND A ONE
for software in range(2):
t = Thread(target=handler.braker_exonerate,
args=(queue, ref_rename, long_sorted_bam, args.species, protein_loc,
threads_use, args.fungus, wd, braker_folder, exonerate_wd, args.verbose))
t.daemon = True
t.start()
queue.join()
augustus_file, genemark_file = inputEvm.braker_folder_find(braker_folder)
augustus_gff3 = inputEvm.convert_augustus(augustus_file, wd)
genemark_gff3 = inputEvm.convert_genemark(genemark_file, wd)
merged_prot_gff3 = wd + 'exonerate/protein_evidence.gff3'
final_files.append(grs.trasform_gff(augustus_gff3, dict_ref_name))
final_files.append(grs.trasform_gff(genemark_gff3, dict_ref_name))
final_files.append(grs.trasform_gff(merged_prot_gff3, dict_ref_name))
elif args.species in augustus_species or args.species != "" or args.upgrade != "":
#if os.path.isfile(home + "/.gm_key") and args.proteins != "":
if args.proteins != "":
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###AUGUSTUS, GENEMARK-ES AND EXONERATE STARTED AT:' + now + '\t###\n'))
queue = Queue()
for software in range(3):
queue.put(software) # QUEUE WITH A ZERO AND A ONE
for software in range(3):
t = Thread(target=handler.august_gmes_exonerate, args=(queue, ref_rename, args.species, protein_loc,
threads_use, args.fungus, list_fasta_names, wd, exonerate_wd,
args.verbose))
t.daemon = True
t.start()
queue.join()
augustus_file = wd + 'augustus/augustus.gff'
augustus_gff3 = inputEvm.convert_augustus(augustus_file, wd)
genemark_file = wd + 'gmes/genemark.gtf'
genemark_gff3 = inputEvm.convert_genemark(genemark_file, wd)
merged_prot_gff3 = wd + 'exonerate/protein_evidence.gff3'
final_files.append(grs.trasform_gff(augustus_gff3, dict_ref_name))
final_files.append(grs.trasform_gff(genemark_gff3, dict_ref_name))
final_files.append(grs.trasform_gff(merged_prot_gff3, dict_ref_name))
else:
now = datetime.datetime.now().strftime(fmtdate)
sys.exit("#####UNRECOGNIZED SPECIES FOR AUGUSTUS AND NO READS\t" + now + "\t#####\n")
# Prepare EVM input files
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###EVM STARTED AT:\t' + now + '\t###\n'))
# HERE WE CONVERT FILES FOR EVM AND PLACE THEM IN INPUT FOLDER
round_n = 0
if args.upgrade == "":
if not args.short_reads and not long_reads:
if external_file:
if external_file.endswith(fasta):
external_file_gff3 = mapping.gmap('ext', ref_rename, external_file, threads_use, 'gff3_gene',
args.min_intron_length, args.max_intron_length, args.end_exon,
gmap_wd, args.verbose, Fflag=True)
external_file_changed = update.external(external_file_gff3, gmap_wd, args.verbose)
elif external_file.endswith("gff3"):
external_file_changed = update.external(external_file, gmap_wd, args.verbose)
evm_inputs = {'augustus': augustus_gff3, 'genemark': genemark_gff3, 'exonerate': merged_prot_gff3,
'external': external_file_changed}
else:
evm_inputs = {'augustus': augustus_gff3, 'genemark': genemark_gff3, 'exonerate': merged_prot_gff3}
elif args.short_reads or long_reads:
if args.external:
external_file = args.external
if external_file.endswith(fasta):
external_file_gff3 = mapping.gmap('ext', ref_rename, external_file, threads_use, 'gff3_gene',
args.min_intron_length, args.max_intron_length, args.end_exon,
gmap_wd, args.verbose, Fflag=True)
external_file_changed = update.external(external_file_gff3, gmap_wd, args.verbose)
elif external_file.endswith("gff3"):
external_file_changed = update.external(external_file, gmap_wd, args.verbose)
evm_inputs = {'pasa': pasa_gff3, 'augustus': augustus_gff3, 'genemark': genemark_gff3,
'exonerate': merged_prot_gff3, 'gmap': trinity_path,'external': external_file_changed}
else:
evm_inputs = {'pasa': pasa_gff3, 'augustus': augustus_gff3, 'genemark': genemark_gff3,
'exonerate': merged_prot_gff3, 'gmap': trinity_path}
# HERE WE RUN EVM; WE PREPARE FILES THAT ARE REQUIRED BY EVM LIKE
# WEIGTH TABLE
list_soft, pred_file, transcript_file, protein_file = inputEvm.group_EVM_inputs(evm_inputs_dir, evm_inputs)
weight_file = inputEvm.evm_weight(evm_inputs_dir, weights_dic, list_soft, pasa_name, gmap_name)
# EVM PIPELINE
if args.short_reads or long_reads: # WE HAVE SHORT READS AND PROTEINS
evm_gff3 = evmPipeline.evm_pipeline(evm_output_dir, threads_use, ref_rename, weight_file, pred_file,
transcript_file, protein_file, args.segmentSize, args.overlap_size,
args.verbose)
final_evm = grs.genename_evm(evm_gff3, args.verbose, evm_output_dir, dict_ref_name, args.upgrade)
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###UPDATE WITH PASA DATABASE STARTED AT:\t ' + now + '\t###\n'))
round_n += 1
final_output = pasa.update_database(threads_use, str(round_n), pasa_dir, pasadb, ref_rename, trinity_out,
final_evm, args.verbose)
if long_reads == '':
final_update_all = grs.genename_last(final_output, args.prefix_gene, args.verbose, pasa_dir, dict_ref_name, "pasa")
final_update_stats = evmPipeline.gff3_stats(final_update_all, pasa_dir)
final_files.append(final_update_all)
final_files.append(final_update_stats)
if "command" not in (iprscan_log.decode("utf-8")) and args.interproscan:
annot, bad_models = iprscan.iprscan(masked_ref, final_update_all, interproscan_out_dir, args.threads)
final_files.append(annot)
final_files.append(bad_models)
final_output_dir = os.path.join(output_dir, args.out_dir + '_output')
logistic.check_create_dir(final_output_dir)
for filename in final_files:
if filename != '':
logistic.copy_file(filename, final_output_dir)
cmdstring = "chmod -R 775 %s" % wd
os.system(cmdstring)
now = datetime.datetime.now().strftime(fmtdate)
sys.exit("#####LOREAN FINISHED WITHOUT USING LONG READS\t" + now + "\t. GOOD BYE.#####\n")
else:
final_keep = grs.genename_last(final_output, args.prefix_gene, args.verbose, pasa_dir, dict_ref_name, "pasa")
final_keep_stats = evmPipeline.gff3_stats(final_keep, pasa_dir)
final_files.append(final_keep)
final_files.append(final_keep_stats)
elif not args.short_reads and not long_reads: # WE HAVE PROTEINS BUT NOT SHORT READS
transcript_file = ''
evm_gff3 = evmPipeline.evm_pipeline(evm_output_dir, threads_use, ref_rename, weight_file, pred_file,
transcript_file, protein_file, args.segmentSize, args.overlap_size,
args.verbose)
final_update_all = grs.genename_last(evm_gff3, args.prefix_gene, args.verbose, pasa_dir, dict_ref_name, "pasa")
final_update_stats = evmPipeline.gff3_stats(final_update_all, pasa_dir)
final_files.append(final_update_all)
final_files.append(final_update_stats)
now = datetime.datetime.now().strftime(fmtdate)
if "command" not in (iprscan_log.decode("utf-8")) and args.interproscan:
annot, bad_models = iprscan.iprscan(masked_ref, final_update_all, interproscan_out_dir, args.threads)
final_files.append(annot)
final_files.append(bad_models)
final_output_dir = os.path.join(output_dir, args.out_dir + '_output')
logistic.check_create_dir(final_output_dir)
for filename in final_files:
if filename != '':
logistic.copy_file(filename, final_output_dir)
cmdstring = "chmod -R 775 %s" % wd
os.system(cmdstring)
now = datetime.datetime.now().strftime(fmtdate)
sys.exit("##### EVM FINISHED AT:\t" + now + "\t#####\n")
else:
final_evm = grs.genename_evm(args.upgrade, args.verbose, evm_output_dir, dict_ref_name, args.upgrade)
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###UPDATE WITH PASA DATABASE STARTED AT:\t ' + now + '\t###\n'))
round_n += 1
final_output = pasa.update_database(threads_use, str(round_n), pasa_dir, pasadb, ref_rename, trinity_out,
final_evm, args.verbose)
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###RUNNING iASSEMBLER\t' + now + '\t###\n'))
if not long_sorted_bam:
#print("line 430")
long_fasta, stranded_value_new = mseq.filterLongReads(long_reads, args.assembly_overlap_length, args.max_long_read, gmap_wd,
adapter_value, threads_use, args.adapter_match_score, ref_rename,
args.max_intron_length, args.verbose, stranded_value)
if stranded_value != stranded_value_new:
stranded_value = stranded_value_new
if args.minimap2:
long_sam = mapping.minimap(ref_rename, long_fasta, threads_use, args.max_intron_length, gmap_wd, args.verbose)
else:
long_sam = mapping.gmap('sam', ref_rename, long_fasta, threads_use, 'samse',
args.min_intron_length, args.max_intron_length, args.end_exon, gmap_wd,
args.verbose, Fflag=False)
long_sorted_bam = mapping.sam_to_sorted_bam(long_sam, threads_use, wd, args.verbose)
sam_orig_id = mapping.change_chr(long_sorted_bam, dict_ref_name, gmap_wd, threads_use, args.verbose, "long")
final_files.append(sam_orig_id)
# HERE WE MERGE THE GMAP OUTPUT WITH THE EVM OUTPUT TO HAVE ONE # FILE
# HERE WE CHECK IF WE HAVE THE PASA UPDATED FILE OR THE EVM
# ORIGINAL FILE
mergedmap_gff3 = logistic.catTwoBeds(long_sorted_bam, final_evm, args.verbose, consensus_wd)
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(("\t###GFFREAD\t" + now + "\t###\n"))
# HERE WE TRANSFORM THE COODINATES INTO SEQUENCES USING THE
# REFERENCE
gffread_fasta_file = consensus.gffread(mergedmap_gff3, ref_rename, consensus_wd, args.verbose)
# HERE WE STORE THE SEQUENCE IN A DICTIONARY
gffread_dict = consensus.fasta2Dict(gffread_fasta_file)
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(("\t#CLUSTERING\t" + now + "\t###\n"))
# HERE WE CLUSTER THE SEQUENCES BASED ON THE GENOME POSITION
cluster_list = consensus.cluster_pipeline(mergedmap_gff3, stranded_value, args.verbose)
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(("\t#CONSENSUS FOR EACH CLUSTER\t" + now + "\t###\n"))
# HERE WE MAKE CONSENSUS FOR EACH CLUSTER
tmp_wd = consensus_wd + 'tmp/'
logistic.check_create_dir(tmp_wd)
tmp_assembly_file = tmp_wd + 'assembly.fasta'
if os.path.isfile(tmp_assembly_file):
sys.stdout.write('No assembly')
else:
consensus.generate_fasta(cluster_list, gffread_dict, args.cluster_min_evidence,
args.cluster_max_evidence, args.assembly_overlap_length, stranded_value, tmp_wd)
consensus.assembly(args.assembly_overlap_length, args.assembly_percent_identity, threads_use, tmp_wd,
args.verbose)
utrs.lengthSupport(tmp_wd, threads_use)
# WITH THE ELSE, WE ALLOW THE USER TO DECIDE TO CHANGE THE ASSEMBLY
# PARAMETERS AND COLLECT DIFFERENT ASSEMBLED SEQUENCES WITHOT RUNNING
# THE FULL PIPELINE
# HERE WE COLLECT THE ASSEMBLED SEQUENCES. WE COLLECT ONLY SEQUENCE
# THAT PASS THE FILTER
tmp_consensus = os.path.join(consensus_wd , 'tmp/')
collect.parse_only(args.assembly_read_threshold, tmp_consensus, args.verbose)
tmp_assembly = collect.cat_assembled(tmp_consensus)
tmp_assembly_all = collect.cat_assembled_all(tmp_consensus)
# HERE WE COLLECT THE NEW ASSEMBLED SEQUENCES AND WE COLLECT THE OLD
# EVM DATA
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(("###MAPPING CONSENSUS ASSEMBLIES\t" + now + "\t###\n"))
# HERE WE MAP ALL THE FASTA FILES TO THE GENOME USING GMAP
consensus_mapped_gff3 = mapping.gmap('cons', ref_rename, tmp_assembly, threads_use, 'gff3_gene',
args.min_intron_length, args.max_intron_length, args.end_exon, gmap_wd,
args.verbose, Fflag=True)
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(("###GETTING THE STRAND RIGHT\t" + now + "\t###\n"))
merged_gff3 = collect.add_EVM(final_output, gmap_wd, consensus_mapped_gff3)
#print(merged_gff3)
update2 = grs.exonerate(ref_rename, merged_gff3, threads_use, exonerate_wd, args.verbose)
print(ref_rename, update2)
update3_1 = grs.remove_redudant(ref_rename, update2)
print(update3_1)
update3 = grs.genename_lorean(update3_1, args.verbose, exonerate_wd)
print(update3)
# HERE WE COMBINE TRINITY OUTPUT AND THE ASSEMBLY OUTPUT TO RUN AGAIN
# PASA TO CORRECT SMALL ERRORS
sys.stdout.write(("###FIXING GENES NON STARTING WITH MET\t" + now + "\t###\n"))
fasta_all = logistic.cat_two_fasta(trinity_out, tmp_assembly_all, long_fasta, pasa_dir)
round_n += 1
update5 = pasa.update_database(threads_use, str(round_n), pasa_dir, pasadb, ref_rename, fasta_all,
update3, args.verbose)
if args.verbose:
sys.stdout.write(update5)
round_n += 1
update6 = pasa.update_database(threads_use, str(round_n), pasa_dir, pasadb, ref_rename, fasta_all,
update5, args.verbose)
if args.verbose:
sys.stdout.write(update6)
final_update_update = grs.genename_last(update6, args.prefix_gene, args.verbose, pasa_dir, dict_ref_name, "lorean")
final_files.append(final_update_update)
final_update_stats = evmPipeline.gff3_stats(final_update_update, pasa_dir)
final_files.append(final_update_stats)
if "command" not in (iprscan_log.decode("utf-8")) and args.interproscan:
annot, bad_models = iprscan.iprscan(masked_ref, final_update_update, interproscan_out_dir, args.threads)
final_files.append(annot)
final_files.append(bad_models)
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(('###CREATING OUTPUT DIRECTORY\t' + now + '\t###\n'))
final_output_dir = os.path.join(output_dir, args.out_dir + '_output')
logistic.check_create_dir(final_output_dir)
now = datetime.datetime.now().strftime(fmtdate)
sys.stdout.write(("##PLACING OUTPUT FILES IN OUTPUT DIRECTORY\t" + now + "\t###\n"))
for filename in final_files:
if os.path.exists(filename):
logistic.copy_file(filename, final_output_dir)
cmdstring = "chmod -R 775 %s" % wd
os.system(cmdstring)
sys.exit("##### LOREAN FINISHED HERE. GOOD BYE. #####\n")
if __name__ == '__main__':
realstart = time.perf_counter()
main()
| |
# -*- coding: utf-8 -*-
"""
Production Configurations
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis for cache
- Use sentry for error logging
"""
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
import logging
from .base import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env('DJANGO_SECRET_KEY')
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# raven sentry client
# See https://docs.sentry.io/clients/python/integrations/django/
INSTALLED_APPS += ['raven.contrib.django.raven_compat', ]
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.io/
WHITENOISE_MIDDLEWARE = ['whitenoise.middleware.WhiteNoiseMiddleware', ]
MIDDLEWARE = WHITENOISE_MIDDLEWARE + MIDDLEWARE
RAVEN_MIDDLEWARE = ['raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware']
MIDDLEWARE = RAVEN_MIDDLEWARE + MIDDLEWARE
# SECURITY CONFIGURATION
# ------------------------------------------------------------------------------
# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security
# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
'DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS', default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
'DJANGO_SECURE_CONTENT_TYPE_NOSNIFF', default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool('DJANGO_SECURE_SSL_REDIRECT', default=True)
CSRF_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
X_FRAME_OPTIONS = 'DENY'
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['nectr.foxtrotli.co', ])
# END SITE CONFIGURATION
# INSTALLED_APPS += ['gunicorn', ]
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.io/en/latest/index.html
INSTALLED_APPS += ['storages', ]
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# COMPRESSOR
# ------------------------------------------------------------------------------
COMPRESS_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
COMPRESS_URL = STATIC_URL
COMPRESS_ENABLED = env.bool('COMPRESS_ENABLED', default=True)
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='nectR Tutoring <noreply@nectr.foxtrotli.co>')
EMAIL_SUBJECT_PREFIX = env('DJANGO_EMAIL_SUBJECT_PREFIX', default='[nectR Tutoring]')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# Anymail with Mailgun
INSTALLED_APPS += ['anymail', ]
ANYMAIL = {
'MAILGUN_API_KEY': env('DJANGO_MAILGUN_API_KEY'),
'MAILGUN_SENDER_DOMAIN': env('MAILGUN_SENDER_DOMAIN')
}
EMAIL_BACKEND = 'anymail.backends.mailgun.MailgunBackend'
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Use the Heroku-style specification
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db('DATABASE_URL')
# CACHING
# ------------------------------------------------------------------------------
REDIS_LOCATION = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0)
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_LOCATION,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# CHANNELS BACKEND
# ------------------------------------------------------------------------------
REDIS_LOCATION_CHANNELS = '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 1)
# Got to use separate redis DB number
CHANNEL_LAYERS = {
"default": {
"BACKEND": "asgi_redis.RedisChannelLayer",
"CONFIG": {
"hosts": [REDIS_LOCATION_CHANNELS],
},
"ROUTING": "config.routing.channel_routing",
},
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry', ],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console', ],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console', ],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry', ],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| |
#!/usr/bin/env python
# -*- coding: Latin-1 -*-
# pycrc test application.
from optparse import OptionParser, Option, OptionValueError
from copy import copy
import os, sys
import tempfile
sys.path.append('..')
sys.path.append('.')
from pycrc.models import CrcModels
from pycrc.algorithms import Crc
class Options(object):
"""
The options parsing and validating class
"""
def __init__(self):
self.AllAlgorithms = set(['bit-by-bit', 'bbb', 'bit-by-bit-fast', 'bbf', 'table-driven', 'tbl'])
self.Compile = False
self.RandomParameters = False
self.CompileMixedArgs = False
self.VariableWidth = False
self.verbose = False
self.algorithm = copy(self.AllAlgorithms)
def parse(self, argv = None):
"""
Parses and validates the options given as arguments
"""
usage = """%prog [OPTIONS]"""
algorithms = ', '.join(sorted(list(self.AllAlgorithms)) + ['all'])
parser = OptionParser(usage=usage)
parser.add_option('-v', '--verbose',
action='store_true', dest='verbose', default=self.verbose,
help='print information about the model')
parser.add_option('-c', '--compile',
action='store_true', dest='compile', default=self.Compile,
help='test compiled version')
parser.add_option('-r', '--random-parameters',
action='store_true', dest='random_parameters', default=self.RandomParameters,
help='test random parameters')
parser.add_option('-m', '--compile-mixed-arguments',
action='store_true', dest='compile_mixed_args', default=self.CompileMixedArgs,
help='test compiled C program with some arguments given at compile time some arguments given at runtime')
parser.add_option('-w', '--variable-width',
action='store_true', dest='variable_width', default=self.VariableWidth,
help='test variable width from 1 to 64')
parser.add_option('-a', '--all',
action='store_true', dest='all', default=False,
help='do all tests')
parser.add_option('--algorithm',
action='store', type='string', dest='algorithm', default='all',
help='choose an algorithm from {{{0:s}}}'.format(algorithms, metavar='ALGO'))
(options, args) = parser.parse_args(argv)
self.verbose = options.verbose
self.Compile = options.all or options.compile or options.random_parameters
self.RandomParameters = options.all or options.random_parameters
self.CompileMixedArgs = options.all or options.compile_mixed_args
self.VariableWidth = options.all or options.variable_width
if options.algorithm is not None:
alg = options.algorithm.lower()
if alg in self.AllAlgorithms:
self.algorithm = set([alg])
elif alg == 'all':
self.algorithm = copy(self.AllAlgorithms)
else:
sys.stderr.write('unknown algorithm: {0:s}\n'.format(alg))
sys.exit(1)
class CrcTests(object):
"""
The CRC test class.
"""
def __init__(self):
"""
The class constructor.
"""
self.pycrc_bin = '/bin/false'
self.use_algo_bit_by_bit = True
self.use_algo_bit_by_bit_fast = True
self.use_algo_table_driven = True
self.verbose = False
self.python3 = sys.version_info[0] >= 3
self.tmpdir = tempfile.mkdtemp(prefix='pycrc.')
self.check_file = None
self.crc_bin_bbb_c89 = None
self.crc_bin_bbb_c99 = None
self.crc_bin_bbf_c89 = None
self.crc_bin_bbf_c99 = None
self.crc_bin_bwe_c89 = None
self.crc_bin_bwe_c99 = None
self.crc_bin_tbl_c89 = None
self.crc_bin_tbl_c99 = None
self.crc_bin_tbl_sb4 = None
self.crc_bin_tbl_sb8 = None
self.crc_bin_tbl_sb16 = None
self.crc_bin_tbl_idx2 = None
self.crc_bin_tbl_idx4 = None
def __del__(self):
"""
The class destructor. Delete all generated files.
"""
if self.check_file is not None:
os.remove(self.check_file)
if self.crc_bin_bbb_c89 is not None:
self.__del_files([self.crc_bin_bbb_c89, self.crc_bin_bbb_c89+'.h', self.crc_bin_bbb_c89+'.c'])
if self.crc_bin_bbb_c99 is not None:
self.__del_files([self.crc_bin_bbb_c99, self.crc_bin_bbb_c99+'.h', self.crc_bin_bbb_c99+'.c'])
if self.crc_bin_bbf_c89 is not None:
self.__del_files([self.crc_bin_bbf_c89, self.crc_bin_bbf_c89+'.h', self.crc_bin_bbf_c89+'.c'])
if self.crc_bin_bbf_c99 is not None:
self.__del_files([self.crc_bin_bbf_c99, self.crc_bin_bbf_c99+'.h', self.crc_bin_bbf_c99+'.c'])
if self.crc_bin_bwe_c89 is not None:
self.__del_files([self.crc_bin_bwe_c89, self.crc_bin_bwe_c89+'.h', self.crc_bin_bwe_c89+'.c'])
if self.crc_bin_bwe_c99 is not None:
self.__del_files([self.crc_bin_bwe_c99, self.crc_bin_bwe_c99+'.h', self.crc_bin_bwe_c99+'.c'])
if self.crc_bin_tbl_c89 is not None:
self.__del_files([self.crc_bin_tbl_c89, self.crc_bin_tbl_c89+'.h', self.crc_bin_tbl_c89+'.c'])
if self.crc_bin_tbl_c99 is not None:
self.__del_files([self.crc_bin_tbl_c99, self.crc_bin_tbl_c99+'.h', self.crc_bin_tbl_c99+'.c'])
if self.crc_bin_tbl_sb4 is not None:
self.__del_files([self.crc_bin_tbl_sb4, self.crc_bin_tbl_sb4+'.h', self.crc_bin_tbl_sb4+'.c'])
if self.crc_bin_tbl_sb8 is not None:
self.__del_files([self.crc_bin_tbl_sb8, self.crc_bin_tbl_sb8+'.h', self.crc_bin_tbl_sb8+'.c'])
if self.crc_bin_tbl_sb16 is not None:
self.__del_files([self.crc_bin_tbl_sb16, self.crc_bin_tbl_sb16+'.h', self.crc_bin_tbl_sb16+'.c'])
if self.crc_bin_tbl_idx2 is not None:
self.__del_files([self.crc_bin_tbl_idx2, self.crc_bin_tbl_idx2+'.h', self.crc_bin_tbl_idx2+'.c'])
if self.crc_bin_tbl_idx4 is not None:
self.__del_files([self.crc_bin_tbl_idx4, self.crc_bin_tbl_idx4+'.h', self.crc_bin_tbl_idx4+'.c'])
os.removedirs(self.tmpdir)
def __del_files(delf, files):
"""
Helper function to delete files.
"""
for f in files:
try:
os.remove(f)
except:
print("error: can't delete {0:s}".format(f))
pass
def __get_status_output(self, cmd_str):
if self.python3:
import subprocess
return subprocess.getstatusoutput(cmd_str)
else:
import commands
return commands.getstatusoutput(cmd_str)
def __make_src(self, args, basename, cstd):
"""
Generate the *.h and *.c source files for a test.
"""
gen_src = '{0:s}/{1:s}'.format(self.tmpdir, basename)
cmd_str = self.pycrc_bin + ' {0:s} --std {1:s} --generate h -o {2:s}.h'.format(args, cstd, gen_src)
if self.verbose:
print(cmd_str)
ret = self.__get_status_output(cmd_str)
if ret[0] != 0:
print('error: the following command returned error: {0:s}'.format(cmd_str))
print(ret[1])
print(ret[2])
return None
cmd_str = self.pycrc_bin + ' {0:s} --std {1:s} --generate c-main -o {2:s}.c'.format(args, cstd, gen_src)
if self.verbose:
print(cmd_str)
ret = self.__get_status_output(cmd_str)
if ret[0] != 0:
print('error: the following command returned error: {0:s}'.format(cmd_str))
print(ret[1])
print(ret[2])
return None
return gen_src
def __compile(self, args, binfile, cstd):
"""
Compile a generated source file.
"""
cmd_str = 'gcc -W -Wall -pedantic -Werror -std={0:s} -o {1:s} {2:s}.c'.format(cstd, binfile, binfile)
if self.verbose:
print(cmd_str)
ret = self.__get_status_output(cmd_str)
if len(ret) > 1 and len(ret[1]) > 0:
print(ret[1])
if ret[0] != 0:
print('error: {0:d} with command error: {1:s}'.format(ret[0], cmd_str))
return None
return binfile
def __make_bin(self, args, basename, cstd='c99'):
"""
Generate the source and compile to a binary.
"""
filename = self.__make_src(args, basename, cstd)
if filename is None:
return None
if not self.__compile(args, filename, cstd):
self.__del_files([filename, filename+'.h', filename+'.c'])
return None
return filename
def __setup_files(self, opt):
"""
Set up files needed during the test.
"""
if self.verbose:
print('Setting up files...')
self.check_file = '{0:s}/check.txt'.format(self.tmpdir)
f = open(self.check_file, 'wb')
if self.python3:
f.write(bytes('123456789', 'utf-8'))
else:
f.write('123456789')
f.close()
if opt.Compile:
if self.use_algo_bit_by_bit:
filename = self.__make_bin('--algorithm bit-by-bit', 'crc_bbb_c89', 'c89')
if filename is None:
return False
self.crc_bin_bbb_c89 = filename
filename = self.__make_bin('--algorithm bit-by-bit', 'crc_bbb_c99', 'c99')
if filename is None:
return False
self.crc_bin_bbb_c99 = filename
if self.use_algo_bit_by_bit_fast:
filename = self.__make_bin('--algorithm bit-by-bit-fast', 'crc_bbf_c89', 'c89')
if filename is None:
return False
self.crc_bin_bbf_c89 = filename
filename = self.__make_bin('--algorithm bit-by-bit-fast', 'crc_bbf_c99', 'c99')
if filename is None:
return False
self.crc_bin_bbf_c99 = filename
if self.use_algo_table_driven:
filename = self.__make_bin('--algorithm table-driven', 'crc_tbl_c89', 'c89')
if filename is None:
return False
self.crc_bin_tbl_c89 = filename
filename = self.__make_bin('--algorithm table-driven', 'crc_tbl_c99', 'c99')
if filename is None:
return False
self.crc_bin_tbl_c99 = filename
# FIXME don't test undefined params
# filename = self.__make_bin('--algorithm table-driven --slice-by 4', 'crc_tbl_sb4')
# if filename is None:
# return False
# self.crc_bin_tbl_sb4 = filename
#
# filename = self.__make_bin('--algorithm table-driven --slice-by 8', 'crc_tbl_sb8')
# if filename is None:
# return False
# self.crc_bin_tbl_sb8 = filename
#
# filename = self.__make_bin('--algorithm table-driven --slice-by 16', 'crc_tbl_sb16')
# if filename is None:
# return False
# self.crc_bin_tbl_sb16 = filename
filename = self.__make_bin('--algorithm table-driven --table-idx-width 2', 'crc_tbl_idx2')
if filename is None:
return False
self.crc_bin_tbl_idx2 = filename
filename = self.__make_bin('--algorithm table-driven --table-idx-width 4', 'crc_tbl_idx4')
if filename is None:
return False
self.crc_bin_tbl_idx4 = filename
return True
def __run_command(self, cmd_str):
"""
Run a command and return its stdout.
"""
if self.verbose:
print(cmd_str)
ret = self.__get_status_output(cmd_str)
if ret[0] != 0:
print('error: the following command returned error: {0:s}'.format(cmd_str))
print(ret[1])
return None
return ret[1]
def __check_command(self, cmd_str, expected_result):
"""
Run a command and check if the stdout matches the expected result.
"""
ret = self.__run_command(cmd_str)
if int(ret, 16) != expected_result:
print('error: different checksums!')
print('{0:s}: expected {1:#x}, got {2:s}'.format(cmd_str, expected_result, ret))
return False
return True
def __check_bin(self, args, expected_result, long_data_type = True):
"""
Check all precompiled binaries.
"""
for binary in [
self.crc_bin_bbb_c89, self.crc_bin_bbb_c99,
self.crc_bin_bbf_c89, self.crc_bin_bbf_c99,
self.crc_bin_tbl_c89, self.crc_bin_tbl_c99,
self.crc_bin_tbl_sb4, self.crc_bin_tbl_sb8, self.crc_bin_tbl_sb16,
self.crc_bin_tbl_idx2, self.crc_bin_tbl_idx4]:
if binary is not None:
# Don't test width > 32 for C89, as I don't know how to ask for an data type > 32 bits.
if binary[-3:] == 'c89' and long_data_type:
continue
cmd_str = binary + ' ' + args
if not self.__check_command(cmd_str, expected_result):
return False
return True
def __get_crc(self, model, check_str = '123456789', expected_crc = None):
"""
Get the CRC for a set of parameters from the Python reference implementation.
"""
if self.verbose:
out_str = 'Crc(width = {width:d}, poly = {poly:#x}, reflect_in = {reflect_in}, xor_in = {xor_in:#x}, reflect_out = {reflect_out}, xor_out = {xor_out:#x})'.format(**model)
if expected_crc is not None:
out_str += ' [check = {0:#x}]'.format(expected_crc)
print(out_str)
alg = Crc(width = model['width'], poly = model['poly'],
reflect_in = model['reflect_in'], xor_in = model['xor_in'],
reflect_out = model['reflect_out'], xor_out = model['xor_out'])
error = False
crc = expected_crc
if self.use_algo_bit_by_bit:
bbb_crc = alg.bit_by_bit(check_str)
if crc is None:
crc = bbb_crc
error = error or bbb_crc != crc
if self.use_algo_bit_by_bit_fast:
bbf_crc = alg.bit_by_bit_fast(check_str)
if crc is None:
crc = bbf_crc
error = error or bbf_crc != crc
if self.use_algo_table_driven:
tbl_crc = alg.table_driven(check_str)
if crc is None:
crc = tbl_crc
error = error or tbl_crc != crc
if error:
print('error: different checksums!')
if expected_crc is not None:
print(' check: {0:#x}'.format(expected_crc))
if self.use_algo_bit_by_bit:
print(' bit-by-bit: {0:#x}'.format(bbb_crc))
if self.use_algo_bit_by_bit_fast:
print(' bit-by-bit-fast: {0:#x}'.format(bbf_crc))
if self.use_algo_table_driven:
print(' table_driven: {0:#x}'.format(tbl_crc))
return None
return crc
def __compile_and_check_res(self, cmp_opt, run_opt, name, expected_crc):
"""
Compile a model and run it.
"""
filename = self.__make_bin(cmp_opt, name)
if filename is None:
return False
if run_opt is None:
cmd = filename
else:
cmd = filename + ' ' + run_opt
ret = self.__check_command(cmd, expected_crc)
self.__del_files([filename, filename+'.h', filename+'.c'])
if not ret:
return False
return True
def __test_models(self):
"""
Standard Tests.
Test all known models.
"""
if self.verbose:
print('Running __test_models()...')
check_str = '123456789'
check_bytes = bytearray(check_str, 'utf-8')
models = CrcModels()
for m in models.models:
expected_crc = m['check']
if self.__get_crc(m, check_str, expected_crc) != expected_crc:
return False
ext_args = '--width {width:d} --poly {poly:#x} --xor-in {xor_in:#x} --reflect-in {reflect_in} --xor-out {xor_out:#x} --reflect-out {reflect_out}'.format(**m)
cmd_str = '{0:s} --model {1:s}'.format(self.pycrc_bin, m['name'])
if not self.__check_command(cmd_str, expected_crc):
return False
cmd_str = '{0:s} {1:s}'.format(self.pycrc_bin, ext_args)
if not self.__check_command(cmd_str, expected_crc):
return False
cmd_str = '{0:s} {1:s} --check-hexstring {2:s}'.format(self.pycrc_bin, ext_args, ''.join(['{0:02x}'.format(c) for c in check_bytes]))
if not self.__check_command(cmd_str, expected_crc):
return False
cmd_str = '{0:s} --model {1:s} --check-file {2:s}'.format(self.pycrc_bin, m['name'], self.check_file)
if not self.__check_command(cmd_str, expected_crc):
return False
if not self.__check_bin(ext_args, expected_crc, m['width'] > 32):
return False
if self.verbose:
print("")
return True
def __test_compiled_models(self):
"""
Standard Tests.
Test all known models with the compiled code
"""
if self.verbose:
print('Running __test_compiled_models()...')
models = CrcModels()
for m in models.models:
expected_crc = m['check']
cmp_opt = '--model {name}'.format(**m)
if self.use_algo_bit_by_bit:
if not self.__compile_and_check_res('--algorithm bit-by-bit' + ' ' + cmp_opt, None, 'crc_bbb_mod', expected_crc):
return False
if self.use_algo_bit_by_bit_fast:
if not self.__compile_and_check_res('--algorithm bit-by-bit-fast' + ' ' + cmp_opt, None, 'crc_bbf_mod', expected_crc):
return False
if self.use_algo_table_driven:
if not self.__compile_and_check_res('--algorithm table-driven' + ' ' + cmp_opt, None, 'crc_tbl_mod', expected_crc):
return False
if not self.__compile_and_check_res('--algorithm table-driven --slice-by=4' + ' ' + cmp_opt, None, 'crc_tsb4_mod', expected_crc):
return False
if not self.__compile_and_check_res('--algorithm table-driven --slice-by=8' + ' ' + cmp_opt, None, 'crc_tsb8_mod', expected_crc):
return False
if not self.__compile_and_check_res('--algorithm table-driven --slice-by=16' + ' ' + cmp_opt, None, 'crc_tsb16_mod', expected_crc):
return False
if not self.__compile_and_check_res('--algorithm table-driven --table-idx-width=2' + ' ' + cmp_opt, None, 'crc_tix2_mod', expected_crc):
return False
if not self.__compile_and_check_res('--algorithm table-driven --table-idx-width=4' + ' ' + cmp_opt, None, 'crc_tix4_mod', expected_crc):
return False
return True
def __test_compiled_special_cases(self):
"""
Standard Tests.
Test some special cases.
"""
if self.verbose:
print('Running __test_compiled_special_cases()...')
if self.use_algo_table_driven:
if not self.__compile_and_check_res('--model=crc-5 --reflect-in=0 --algorithm table-driven --table-idx-width=8', None, 'crc_tbl_special', 0x01):
return False
if not self.__compile_and_check_res('--model=crc-5 --reflect-in=0 --algorithm table-driven --table-idx-width=4', None, 'crc_tbl_special', 0x01):
return False
if not self.__compile_and_check_res('--model=crc-5 --reflect-in=0 --algorithm table-driven --table-idx-width=2', None, 'crc_tbl_special', 0x01):
return False
return True
def __test_variable_width(self):
"""
Test variable width.
"""
if self.verbose:
print('Running __test_variable_width()...')
models = CrcModels()
m = models.get_params('crc-64-jones')
for width in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 15, 16, 17, 23, 24, 25, 31, 32, 33, 63, 64]:
mask = (1 << width) - 1
mw = {
'width': width,
'poly': m['poly'] & mask,
'reflect_in': m['reflect_in'],
'xor_in': m['xor_in'] & mask,
'reflect_out': m['reflect_out'],
'xor_out': m['xor_out'] & mask,
}
args = '--width {width:d} --poly {poly:#x} --xor-in {xor_in:#x} --reflect-in {reflect_in} --xor-out {xor_out:#x} --reflect-out {reflect_out}'.format(**mw)
check = self.__get_crc(mw)
if check is None:
return False
if self.use_algo_bit_by_bit:
if self.crc_bin_bbb_c99 is not None:
if not self.__check_command(self.crc_bin_bbb_c99 + ' ' + args, check):
return False
if not self.__compile_and_check_res('--algorithm bit-by-bit' + ' ' + args, None, 'crc_bbb_arg', check):
return False
if self.use_algo_bit_by_bit_fast:
if self.crc_bin_bbf_c99 is not None:
if not self.__check_command(self.crc_bin_bbf_c99 + ' ' + args, check):
return False
if not self.__compile_and_check_res('--algorithm bit-by-bit-fast' + ' ' + args, None, 'crc_bbf_arg', check):
return False
if self.use_algo_table_driven:
if self.crc_bin_tbl_c99 is not None:
if not self.__check_command(self.crc_bin_tbl_c99 + ' ' + args, check):
return False
if not self.__compile_and_check_res('--algorithm table-driven' + ' ' + args, None, 'crc_tbl_arg', check):
return False
return True
def __test_compiled_mixed_args(self):
"""
Test compiled arguments.
"""
if self.verbose:
print('Running __test_compiled_mixed_args()...')
m = {
'name': 'zmodem',
'width': ['', '--width 16'],
'poly': ['', '--poly 0x1021'],
'reflect_in': ['', '--reflect-in False'],
'xor_in': ['', '--xor-in 0x0'],
'reflect_out': ['', '--reflect-out False'],
'xor_out': ['', '--xor-out 0x0'],
'check': 0x31c3,
}
cmp_args = {}
run_args = {}
for b_width in range(2):
cmp_args['width'] = m['width'][b_width]
run_args['width'] = m['width'][1 - b_width]
for b_poly in range(2):
cmp_args['poly'] = m['poly'][b_poly]
run_args['poly'] = m['poly'][1 - b_poly]
for b_ref_in in range(2):
cmp_args['reflect_in'] = m['reflect_in'][b_ref_in]
run_args['reflect_in'] = m['reflect_in'][1 - b_ref_in]
for b_xor_in in range(2):
cmp_args['xor_in'] = m['xor_in'][b_xor_in]
run_args['xor_in'] = m['xor_in'][1 - b_xor_in]
for b_ref_out in range(2):
cmp_args['reflect_out'] = m['reflect_out'][b_ref_out]
run_args['reflect_out'] = m['reflect_out'][1 - b_ref_out]
for b_xor_out in range(2):
cmp_args['xor_out'] = m['xor_out'][b_xor_out]
run_args['xor_out'] = m['xor_out'][1 - b_xor_out]
cmp_opt = '{width:s} {poly:s} {reflect_in} {xor_in:s} {reflect_out} {xor_out:s}'.format(**cmp_args)
run_opt = '{width:s} {poly:s} {reflect_in} {xor_in:s} {reflect_out} {xor_out:s}'.format(**run_args)
if self.use_algo_bit_by_bit:
if not self.__compile_and_check_res('--algorithm bit-by-bit' + ' ' + cmp_opt, run_opt, 'crc_bbb_arg', m['check']):
return False
if self.use_algo_bit_by_bit_fast:
if not self.__compile_and_check_res('--algorithm bit-by-bit-fast' + ' ' + cmp_opt, run_opt, 'crc_bbf_arg', m['check']):
return False
if self.use_algo_table_driven:
if not self.__compile_and_check_res('--algorithm table-driven' + ' ' + cmp_opt, run_opt, 'crc_tbl_arg', m['check']):
return False
return True
def __test_random_params(self):
"""
Test random parameters.
"""
if self.verbose:
print('Running __test_random_params()...')
for width in [8, 16, 32]:
for poly in [0x8005, 0x4c11db7, 0xa5a5a5a5]:
poly = poly & ((1 << width) - 1)
for refin in [0, 1]:
for refout in [0, 1]:
for init in [0x0, 0x1, 0x5a5a5a5a]:
args='--width {0:d} --poly {1:#x} --reflect-in {2} --reflect-out {3} --xor-in {4:#x} --xor-out 0x0'.format(width, poly, refin, refout, init)
cmd_str = self.pycrc_bin + ' ' + args
ret = self.__run_command(cmd_str)
if ret is None:
return False
ret = int(ret, 16)
if not self.__check_bin(args, ret, width > 32):
return False
return True
def run(self, opt):
"""
Run all tests
"""
self.use_algo_bit_by_bit = 'bit-by-bit' in opt.algorithm or 'bbb' in opt.algorithm
self.use_algo_bit_by_bit_fast = 'bit-by-bit-fast' in opt.algorithm or 'bbf' in opt.algorithm
self.use_algo_table_driven = 'table-driven' in opt.algorithm or 'tbl' in opt.algorithm
self.verbose = opt.verbose
if self.python3:
self.pycrc_bin = 'python3 pycrc.py'
else:
self.pycrc_bin = 'python pycrc.py'
if not self.__setup_files(opt):
return False
if not self.__test_models():
return False
if opt.Compile and not self.__test_compiled_models():
return False
if opt.Compile and not self.__test_compiled_special_cases():
return False
if opt.VariableWidth and not self.__test_variable_width():
return False
if opt.CompileMixedArgs and not self.__test_compiled_mixed_args():
return False
if opt.RandomParameters and not self.__test_random_params():
return False
return True
def main():
"""
Main function.
"""
opt = Options()
opt.parse(sys.argv[1:])
test = CrcTests()
if not test.run(opt):
return 1
print('Test OK')
return 0
# program entry point
if __name__ == '__main__':
sys.exit(main())
| |
# Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE.txt
""" """
from rowgenerators import Source
from rowgenerators.source import Source
from rowgenerators import SourceError
class YamlMetatabSource(Source):
"""Turn a metatab-formated YAML file into Metatab rows."""
def __init__(self, ref, table=None, cache=None, working_dir=None, env=None, **kwargs):
super().__init__(ref, cache, working_dir, **kwargs)
self.url = ref
self.section_map = {}
self.sections = {}
def yield_dict(self, doc, d, parent=None):
for k, v in d.items():
tn = "{}.{}".format((parent or 'Root').split('.')[-1], k).lower()
t = doc.decl_terms.get(tn,{})
vtn = t.get('termvaluename','').lower()
if isinstance(v, list):
for e in v:
try:
value = e[vtn]
del e[vtn]
yield (tn, value, parent)
except KeyError:
pass
yield from self.yield_dict(doc, e, tn)
elif isinstance(v, dict):
yield from self.yield_dict(doc, v, tn)
else:
yield (tn,v, parent)
def __iter__(self):
"""Iterate over all of the lines in the file"""
import yaml
from metatab import MetatabDoc
with open(self.url.fspath) as f:
d = yaml.load(f)
decl = d.get('declare', 'metatab-latest')
doc = MetatabDoc(decl=decl)
#yield from doc.rows
section_names = ['root','contacts','documentation','resources','references','schema']
for section_name in section_names:
section = doc.decl_sections[section_name]
#print(section_name, section)
for tn in section.get('terms',[]):
self.section_map[tn.lower()] = section_name
self.sections[section_name] = doc.get_or_new_section(section_name, section['args'])
last_section = None
last_term = { }
for term_name, value, parent in self.yield_dict(doc, d):
print(term_name, value, parent)
section = self.sections.get(self.section_map.get(term_name) or 'root')
if parent is None:
term = section.new_term(term_name, value)
else:
parent_term = last_term[parent]
term = parent_term.new_child(term_name, value)
last_term[term_name] = term
yield from doc.rows
class MetatabRowGenerator(Source):
"""An object that generates rows. The current implementation mostly just a wrapper around
csv.reader, but it adds a path property so term interperters know where the terms are coming from
"""
def __init__(self, ref, cache=None, working_dir=None, path = None, **kwargs):
super().__init__(ref, cache, working_dir, **kwargs)
self._rows = ref
self._path = path or '<none>'
@property
def path(self):
return self._path
def open(self):
pass
def close(self):
pass
def __iter__(self):
for row in self._rows:
yield row
class TextRowGenerator(MetatabRowGenerator):
"""Return lines of text of a line-oriented metatab file, breaking them to be used as Metatab rows.
This is the core of the Lines format implementation"""
def __init__(self, ref, cache=None, working_dir=None, path = None, **kwargs):
super().__init__(ref, cache, working_dir, path, **kwargs)
while True:
try:
# Pathlib Path
with ref.open() as r:
text = r.read()
break
except:
pass
try:
# Filehandle
text = ref.read()
break
except:
pass
try:
# Url
with ref.inner.fspath.open() as f:
text = f.read()
break
except:
pass
try:
# File name
with open(ref) as r:
text = r.read()
break
except:
pass
try:
text = ref
text.splitlines()
break
except AttributeError:
pass
raise SourceError("Can't handle ref of type {}".format(type(ref)))
self._text = text
self._text_lines = text.splitlines()
self._path = path or '<none>'
@property
def path(self):
return self._path
def open(self):
pass
def close(self):
pass
def __iter__(self):
import re
for row in self._text_lines:
if re.match(r'^\s*#', row): # Skip comments
continue
# Special handling for ====, which implies a section:
# ==== Schema
# is also
# Section: Schema
if row.startswith('===='):
row = re.sub(r'^=*','Section:', row)
row = [e.strip() for e in row.split(':', 1)]
# Pipe characters seperate columns
if len(row) > 1:
row = [row[0]] + [ e.replace('\|','|') for e in re.split(r'(?<!\\)\|', row[1]) ]
yield row
| |
"""Treadmill master low level API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import kazoo
import six
from treadmill import appevents
from treadmill import zknamespace as z
from treadmill import zkutils
from treadmill.apptrace import events as traceevents
_LOGGER = logging.getLogger(__name__)
def _app_node(app_id, existing=True):
"""Returns node path given app id."""
path = os.path.join(z.SCHEDULED, app_id)
if not existing:
path = path + '#'
return path
# ACL which allows all servers in the cell to full control over node.
#
# Set in /finished, /servers
_SERVERS_ACL = zkutils.make_role_acl('servers', 'rwcda')
# Delete only servers ACL
_SERVERS_ACL_DEL = zkutils.make_role_acl('servers', 'd')
# Timer interval to reevaluate time events (seconds).
# TIMER_INTERVAL = 60
def create_event(zkclient, priority, event, payload):
"""Places event on the event queue."""
assert 0 <= priority <= 100
node_path = z.path.event(
'%(priority)03d-%(event)s-' % {'priority': priority, 'event': event})
return os.path.basename(
zkutils.put(zkclient, node_path, payload, acl=[_SERVERS_ACL],
sequence=True))
def create_apps(zkclient, app_id, app, count, created_by=None):
"""Schedules new apps."""
instance_ids = []
acl = zkutils.make_role_acl('servers', 'rwcda')
for _idx in range(0, count):
node_path = zkutils.put(zkclient,
_app_node(app_id, existing=False),
app,
sequence=True,
acl=[acl])
instance_id = os.path.basename(node_path)
instance_ids.append(instance_id)
appevents.post_zk(
zkclient,
traceevents.PendingTraceEvent(
instanceid=instance_id,
why='%s:created' % created_by if created_by else 'created',
payload=''
)
)
return instance_ids
def delete_apps(zkclient, app_ids, deleted_by=None):
"""Unschedules apps."""
for app_id in app_ids:
zkutils.ensure_deleted(zkclient, _app_node(app_id))
appevents.post_zk(
zkclient,
traceevents.PendingDeleteTraceEvent(
instanceid=app_id,
why='%s:deleted' % deleted_by if deleted_by else 'deleted'
)
)
def get_app(zkclient, app_id):
"""Return scheduled app details by app_id."""
return zkutils.get_default(zkclient, _app_node(app_id))
def list_scheduled_apps(zkclient):
"""List all scheduled apps."""
scheduled = zkclient.get_children(z.SCHEDULED)
return scheduled
def list_running_apps(zkclient):
"""List all scheduled apps."""
running = zkclient.get_children(z.RUNNING)
return running
def update_app_priorities(zkclient, updates):
"""Updates app priority."""
modified = []
for app_id, priority in six.iteritems(updates):
assert 0 <= priority <= 100
app = get_app(zkclient, app_id)
if app is None:
# app does not exist.
continue
app['priority'] = priority
if zkutils.update(zkclient, _app_node(app_id), app,
check_content=True):
modified.append(app_id)
if modified:
create_event(zkclient, 1, 'apps', modified)
def create_bucket(zkclient, bucket_id, parent_id, traits=0):
"""Creates bucket definition in Zookeeper."""
data = {
'traits': traits,
'parent': parent_id
}
zkutils.put(zkclient, z.path.bucket(bucket_id), data, check_content=True)
create_event(zkclient, 0, 'buckets', None)
def update_bucket_traits(zkclient, bucket_id, traits):
"""Updates bucket traits."""
data = get_bucket(zkclient, bucket_id)
data['traits'] = traits
zkutils.put(zkclient, z.path.bucket(bucket_id), data, check_content=True)
def get_bucket(zkclient, bucket_id):
"""Return bucket definition in Zookeeper."""
return zkutils.get(zkclient, z.path.bucket(bucket_id))
def delete_bucket(zkclient, bucket_id):
"""Deletes bucket definition from Zoookeeper."""
zkutils.ensure_deleted(zkclient, z.path.bucket(bucket_id))
# NOTE: we never remove buckets, no need for event.
def list_buckets(zkclient):
"""List all buckets."""
return sorted(zkclient.get_children(z.BUCKETS))
def create_server(zkclient, server_id, parent_id):
"""Creates server definition in Zookeeper."""
server_node = z.path.server(server_id)
server_acl = zkutils.make_host_acl(server_id, 'rwcd')
zkutils.ensure_exists(zkclient, server_node, acl=[server_acl])
data = zkutils.get(zkclient, server_node)
if parent_id:
if not data:
data = {'parent': parent_id}
else:
data['parent'] = parent_id
_LOGGER.info('Creating server node %s with data %r and ACL %r',
server_node, data, server_acl)
if zkutils.put(zkclient, server_node, data,
acl=[server_acl], check_content=True):
create_event(zkclient, 0, 'servers', [server_id])
def list_servers(zkclient):
"""List all servers."""
return sorted(zkclient.get_children(z.SERVERS))
def update_server_attrs(zkclient, server_id, traits, partition):
"""Updates server traits."""
node = z.path.server(server_id)
data = zkutils.get(zkclient, node)
data['traits'] = traits
data['partition'] = partition
if zkutils.update(zkclient, node, data, check_content=True):
create_event(zkclient, 0, 'servers', [server_id])
def update_server_capacity(zkclient, server_id,
memory=None, cpu=None, disk=None):
"""Update server capacity."""
node = z.path.server(server_id)
data = zkutils.get(zkclient, node)
if memory:
data['memory'] = memory
if cpu:
data['cpu'] = cpu
if disk:
data['disk'] = disk
if zkutils.update(zkclient, node, data, check_content=True):
create_event(zkclient, 0, 'servers', [server_id])
def update_server_features(zkclient, server_id, features):
"""Updates server features."""
node = z.path.server(server_id)
data = zkutils.get(zkclient, node)
data['features'] = features
if zkutils.update(zkclient, node, data, check_content=True):
create_event(zkclient, 0, 'servers', [server_id])
def update_server_parent(zkclient, server_id, parent_id):
"""Update server parent."""
node = z.path.server(server_id)
data = zkutils.get(zkclient, node)
data['parent'] = parent_id
if zkutils.update(zkclient, node, data, check_content=True):
create_event(zkclient, 0, 'servers', [server_id])
def delete_server(zkclient, server_id):
"""Delete the server in Zookeeper."""
zkutils.ensure_deleted(zkclient, z.path.server(server_id))
zkutils.ensure_deleted(zkclient, z.path.placement(server_id))
create_event(zkclient, 0, 'servers', [server_id])
def update_server_state(zkclient, server_id, state, apps=None):
"""Freeze server."""
create_event(zkclient, 0, 'server_state', [server_id, state, apps])
def get_server(zkclient, server_id, placement=False):
"""Return server object."""
data = zkutils.get(zkclient, z.path.server(server_id))
if placement:
placement_data = zkutils.get_default(zkclient,
z.path.placement(server_id),
{})
data.update(placement_data)
return data
def reboot_server(zkclient, server_id):
"""Create server reboot event."""
zkutils.ensure_exists(zkclient, z.path.reboot(server_id),
acl=[_SERVERS_ACL_DEL])
def cell_insert_bucket(zkclient, bucket_id):
"""Add bucket to the cell."""
if not zkclient.exists(z.path.cell(bucket_id)):
zkutils.ensure_exists(zkclient, z.path.cell(bucket_id))
create_event(zkclient, 0, 'cell', None)
def cell_remove_bucket(zkclient, bucket_id):
"""Remove bucket from the cell."""
if zkclient.exists(z.path.cell(bucket_id)):
zkutils.ensure_deleted(zkclient, z.path.cell(bucket_id))
create_event(zkclient, 0, 'cell', None)
def cell_buckets(zkclient):
"""Return list of top level cell buckets."""
return sorted(zkclient.get_children(z.CELL))
def appmonitors(zkclient):
"""Return list of app monitors ids."""
return sorted(zkclient.get_children(z.path.appmonitor()))
def get_appmonitor(zkclient, monitor_id, raise_notfound=False):
"""Return app monitor given id."""
try:
data = zkutils.get(zkclient, z.path.appmonitor(monitor_id))
data['_id'] = monitor_id
return data
except kazoo.client.NoNodeError:
_LOGGER.info('App monitor does not exist: %s', monitor_id)
if raise_notfound:
raise
else:
return None
def update_appmonitor(zkclient, monitor_id, count):
"""Configures app monitor."""
node = z.path.appmonitor(monitor_id)
data = {'count': count}
zkutils.put(zkclient, node, data, check_content=True)
def delete_appmonitor(zkclient, monitor_id):
"""Deletes app monitor."""
zkutils.ensure_deleted(zkclient, z.path.appmonitor(monitor_id))
def identity_groups(zkclient):
"""List all identity groups."""
return sorted(zkclient.get_children(z.IDENTITY_GROUPS))
def get_identity_group(zkclient, ident_group_id):
"""Return app monitor given id."""
data = zkutils.get(zkclient, z.path.identity_group(ident_group_id))
data['_id'] = ident_group_id
return data
def update_identity_group(zkclient, ident_group_id, count):
"""Updates identity group count."""
node = z.path.identity_group(ident_group_id)
data = {'count': count}
if zkutils.put(zkclient,
node,
data,
check_content=True,
acl=[_SERVERS_ACL]):
create_event(zkclient, 0, 'identity_groups', [ident_group_id])
def delete_identity_group(zkclient, ident_group_id):
"""Delete identity group."""
node = z.path.identity_group(ident_group_id)
zkutils.ensure_deleted(zkclient, node)
create_event(zkclient, 0, 'identity_groups', [ident_group_id])
def update_allocations(zkclient, allocations):
"""Updates identity group count."""
if zkutils.put(zkclient,
z.path.allocation(),
allocations,
check_content=True):
create_event(zkclient, 0, 'allocations', None)
def get_scheduled_stats(zkclient):
"""Return count of scheduled apps by proid."""
return zkutils.get_default(zkclient, z.SCHEDULED_STATS, {})
| |
"""Tests for AVM Fritz!Box config flow."""
from unittest import mock
from pyfritzhome import LoginError
import pytest
from requests.exceptions import HTTPError
from homeassistant.components.fritzbox.const import DOMAIN
from homeassistant.components.ssdp import (
ATTR_SSDP_LOCATION,
ATTR_UPNP_FRIENDLY_NAME,
ATTR_UPNP_UDN,
)
from homeassistant.const import CONF_DEVICES, CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers.typing import HomeAssistantType
from . import MOCK_CONFIG
from tests.async_mock import Mock, patch
MOCK_USER_DATA = MOCK_CONFIG[DOMAIN][CONF_DEVICES][0]
MOCK_SSDP_DATA = {
ATTR_SSDP_LOCATION: "https://fake_host:12345/test",
ATTR_UPNP_FRIENDLY_NAME: "fake_name",
ATTR_UPNP_UDN: "uuid:only-a-test",
}
@pytest.fixture(name="fritz")
def fritz_fixture() -> Mock:
"""Patch libraries."""
with patch("homeassistant.components.fritzbox.config_flow.Fritzhome") as fritz:
yield fritz
async def test_user(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_host"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert not result["result"].unique_id
async def test_user_auth_failed(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user with authentication failure."""
fritz().login.side_effect = [LoginError("Boom"), mock.DEFAULT]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"]["base"] == "invalid_auth"
async def test_user_not_successful(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user but no connection found."""
fritz().login.side_effect = OSError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices_found"
async def test_user_already_configured(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by user when already configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert not result["result"].unique_id
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_import(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow by import."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "import"}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_host"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert not result["result"].unique_id
async def test_ssdp(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "fake_pass", CONF_USERNAME: "fake_user"},
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_name"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert result["result"].unique_id == "only-a-test"
async def test_ssdp_no_friendly_name(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery without friendly name."""
MOCK_NO_NAME = MOCK_SSDP_DATA.copy()
del MOCK_NO_NAME[ATTR_UPNP_FRIENDLY_NAME]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_NO_NAME
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "fake_pass", CONF_USERNAME: "fake_user"},
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_host"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_PASSWORD] == "fake_pass"
assert result["data"][CONF_USERNAME] == "fake_user"
assert result["result"].unique_id == "only-a-test"
async def test_ssdp_auth_failed(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery with authentication failure."""
fritz().login.side_effect = LoginError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["errors"] == {}
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "whatever", CONF_USERNAME: "whatever"},
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
assert result["errors"]["base"] == "invalid_auth"
async def test_ssdp_not_successful(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery but no device found."""
fritz().login.side_effect = OSError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "whatever", CONF_USERNAME: "whatever"},
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices_found"
async def test_ssdp_not_supported(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery with unsupported device."""
fritz().get_device_elements.side_effect = HTTPError("Boom")
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: "whatever", CONF_USERNAME: "whatever"},
)
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
async def test_ssdp_already_in_progress_unique_id(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery twice."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_ssdp_already_in_progress_host(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery twice."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
MOCK_NO_UNIQUE_ID = MOCK_SSDP_DATA.copy()
del MOCK_NO_UNIQUE_ID[ATTR_UPNP_UDN]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_NO_UNIQUE_ID
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_ssdp_already_configured(hass: HomeAssistantType, fritz: Mock):
"""Test starting a flow from discovery when already configured."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert not result["result"].unique_id
result2 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "ssdp"}, data=MOCK_SSDP_DATA
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
assert result["result"].unique_id == "only-a-test"
| |
"""Support for DoorBird devices."""
from http import HTTPStatus
import logging
from aiohttp import web
from doorbirdpy import DoorBird
import requests
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_TOKEN,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.network import get_url
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import dt as dt_util, slugify
from .const import (
CONF_EVENTS,
DOMAIN,
DOOR_STATION,
DOOR_STATION_EVENT_ENTITY_IDS,
DOOR_STATION_INFO,
PLATFORMS,
UNDO_UPDATE_LISTENER,
)
from .util import get_doorstation_by_token
_LOGGER = logging.getLogger(__name__)
API_URL = f"/api/{DOMAIN}"
CONF_CUSTOM_URL = "hass_url_override"
RESET_DEVICE_FAVORITES = "doorbird_reset_favorites"
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_EVENTS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_CUSTOM_URL): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
CONFIG_SCHEMA = cv.removed(DOMAIN, raise_if_present=False)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the DoorBird component."""
hass.data.setdefault(DOMAIN, {})
# Provide an endpoint for the doorstations to call to trigger events
hass.http.register_view(DoorBirdRequestView)
def _reset_device_favorites_handler(event):
"""Handle clearing favorites on device."""
if (token := event.data.get("token")) is None:
return
doorstation = get_doorstation_by_token(hass, token)
if doorstation is None:
_LOGGER.error("Device not found for provided token")
return
# Clear webhooks
favorites = doorstation.device.favorites()
for favorite_type in favorites:
for favorite_id in favorites[favorite_type]:
doorstation.device.delete_favorite(favorite_type, favorite_id)
hass.bus.async_listen(RESET_DEVICE_FAVORITES, _reset_device_favorites_handler)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up DoorBird from a config entry."""
_async_import_options_from_data_if_missing(hass, entry)
doorstation_config = entry.data
doorstation_options = entry.options
config_entry_id = entry.entry_id
device_ip = doorstation_config[CONF_HOST]
username = doorstation_config[CONF_USERNAME]
password = doorstation_config[CONF_PASSWORD]
device = DoorBird(device_ip, username, password)
try:
status, info = await hass.async_add_executor_job(_init_doorbird_device, device)
except requests.exceptions.HTTPError as err:
if err.response.status_code == HTTPStatus.UNAUTHORIZED:
_LOGGER.error(
"Authorization rejected by DoorBird for %s@%s", username, device_ip
)
return False
raise ConfigEntryNotReady from err
except OSError as oserr:
_LOGGER.error("Failed to setup doorbird at %s: %s", device_ip, oserr)
raise ConfigEntryNotReady from oserr
if not status[0]:
_LOGGER.error(
"Could not connect to DoorBird as %s@%s: Error %s",
username,
device_ip,
str(status[1]),
)
raise ConfigEntryNotReady
token = doorstation_config.get(CONF_TOKEN, config_entry_id)
custom_url = doorstation_config.get(CONF_CUSTOM_URL)
name = doorstation_config.get(CONF_NAME)
events = doorstation_options.get(CONF_EVENTS, [])
doorstation = ConfiguredDoorBird(device, name, custom_url, token)
doorstation.update_events(events)
# Subscribe to doorbell or motion events
if not await _async_register_events(hass, doorstation):
raise ConfigEntryNotReady
undo_listener = entry.add_update_listener(_update_listener)
hass.data[DOMAIN][config_entry_id] = {
DOOR_STATION: doorstation,
DOOR_STATION_INFO: info,
UNDO_UPDATE_LISTENER: undo_listener,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
def _init_doorbird_device(device):
return device.ready(), device.info()
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def _async_register_events(hass, doorstation):
try:
await hass.async_add_executor_job(doorstation.register_events, hass)
except requests.exceptions.HTTPError:
hass.components.persistent_notification.async_create(
"Doorbird configuration failed. Please verify that API "
"Operator permission is enabled for the Doorbird user. "
"A restart will be required once permissions have been "
"verified.",
title="Doorbird Configuration Failure",
notification_id="doorbird_schedule_error",
)
return False
return True
async def _update_listener(hass: HomeAssistant, entry: ConfigEntry):
"""Handle options update."""
config_entry_id = entry.entry_id
doorstation = hass.data[DOMAIN][config_entry_id][DOOR_STATION]
doorstation.update_events(entry.options[CONF_EVENTS])
# Subscribe to doorbell or motion events
await _async_register_events(hass, doorstation)
@callback
def _async_import_options_from_data_if_missing(hass: HomeAssistant, entry: ConfigEntry):
options = dict(entry.options)
modified = False
for importable_option in (CONF_EVENTS,):
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
modified = True
if modified:
hass.config_entries.async_update_entry(entry, options=options)
class ConfiguredDoorBird:
"""Attach additional information to pass along with configured device."""
def __init__(self, device, name, custom_url, token):
"""Initialize configured device."""
self._name = name
self._device = device
self._custom_url = custom_url
self.events = None
self.doorstation_events = None
self._token = token
def update_events(self, events):
"""Update the doorbird events."""
self.events = events
self.doorstation_events = [self._get_event_name(event) for event in self.events]
@property
def name(self):
"""Get custom device name."""
return self._name
@property
def device(self):
"""Get the configured device."""
return self._device
@property
def custom_url(self):
"""Get custom url for device."""
return self._custom_url
@property
def token(self):
"""Get token for device."""
return self._token
def register_events(self, hass):
"""Register events on device."""
# Get the URL of this server
hass_url = get_url(hass)
# Override url if another is specified in the configuration
if self.custom_url is not None:
hass_url = self.custom_url
for event in self.doorstation_events:
self._register_event(hass_url, event)
_LOGGER.info("Successfully registered URL for %s on %s", event, self.name)
@property
def slug(self):
"""Get device slug."""
return slugify(self._name)
def _get_event_name(self, event):
return f"{self.slug}_{event}"
def _register_event(self, hass_url, event):
"""Add a schedule entry in the device for a sensor."""
url = f"{hass_url}{API_URL}/{event}?token={self._token}"
# Register HA URL as webhook if not already, then get the ID
if not self.webhook_is_registered(url):
self.device.change_favorite("http", f"Home Assistant ({event})", url)
if not self.get_webhook_id(url):
_LOGGER.warning(
'Could not find favorite for URL "%s". ' 'Skipping sensor "%s"',
url,
event,
)
return
def webhook_is_registered(self, url, favs=None) -> bool:
"""Return whether the given URL is registered as a device favorite."""
return self.get_webhook_id(url, favs) is not None
def get_webhook_id(self, url, favs=None) -> str or None:
"""
Return the device favorite ID for the given URL.
The favorite must exist or there will be problems.
"""
favs = favs if favs else self.device.favorites()
if "http" not in favs:
return None
for fav_id in favs["http"]:
if favs["http"][fav_id]["value"] == url:
return fav_id
return None
def get_event_data(self):
"""Get data to pass along with HA event."""
return {
"timestamp": dt_util.utcnow().isoformat(),
"live_video_url": self._device.live_video_url,
"live_image_url": self._device.live_image_url,
"rtsp_live_video_url": self._device.rtsp_live_video_url,
"html5_viewer_url": self._device.html5_viewer_url,
}
class DoorBirdRequestView(HomeAssistantView):
"""Provide a page for the device to call."""
requires_auth = False
url = API_URL
name = API_URL[1:].replace("/", ":")
extra_urls = [API_URL + "/{event}"]
async def get(self, request, event):
"""Respond to requests from the device."""
# pylint: disable=no-self-use
hass = request.app["hass"]
token = request.query.get("token")
device = get_doorstation_by_token(hass, token)
if device is None:
return web.Response(
status=HTTPStatus.UNAUTHORIZED, text="Invalid token provided."
)
if device:
event_data = device.get_event_data()
else:
event_data = {}
if event == "clear":
hass.bus.async_fire(RESET_DEVICE_FAVORITES, {"token": token})
message = f"HTTP Favorites cleared for {device.slug}"
return web.Response(text=message)
event_data[ATTR_ENTITY_ID] = hass.data[DOMAIN][
DOOR_STATION_EVENT_ENTITY_IDS
].get(event)
hass.bus.async_fire(f"{DOMAIN}_{event}", event_data)
return web.Response(text="OK")
| |
"""Pre-trained ImageNet networks."""
from typing import Callable, NamedTuple, Tuple, Optional
import sys
from typeguard import check_argument_types
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as tf_slim
# pylint: disable=unused-import
# Workaround of missing slim's import
# see https://github.com/tensorflow/tensorflow/issues/6064
import tensorflow.contrib.slim.nets
# pylint: enable=unused-import
from neuralmonkey.dataset import Dataset
from neuralmonkey.decorators import tensor
from neuralmonkey.model.model_part import ModelPart, FeedDict, InitializerSpecs
from neuralmonkey.model.stateful import SpatialStatefulWithOutput
ImageNetSpec = NamedTuple(
"ImageNetSpec",
[("scope", Callable),
("image_size", Tuple[int, int]),
("apply_net", Callable)])
# pylint: disable=import-error
def get_alexnet() -> ImageNetSpec:
import nets.alexnet_v2
return ImageNetSpec(
scope=nets.alexnet.alexnet_v2_arg_scope,
image_size=(224, 224),
apply_net=lambda image: nets.alexnet.alexnet_v2(
image, is_training=False))
def get_vgg_by_type(vgg_type: str) -> Callable[[], ImageNetSpec]:
def get_vgg() -> ImageNetSpec:
import nets.vgg
if vgg_type == "vgg16":
net_fn = nets.vgg.vgg_16
elif vgg_type == "vgg19":
net_fn = nets.vgg.vgg_19
else:
raise ValueError(
"Unknown type of VGG net: {}".format(vgg_type))
return ImageNetSpec(
scope=nets.vgg.vgg_arg_scope,
image_size=(224, 224),
apply_net=lambda image: net_fn(
image, is_training=False, dropout_keep_prob=1.0))
return get_vgg
def get_resnet_by_type(resnet_type: str) -> Callable[[], ImageNetSpec]:
def get_resnet() -> ImageNetSpec:
import nets.resnet_v2
if resnet_type == "resnet_50":
net_fn = nets.resnet_v2.resnet_v2_50
elif resnet_type == "resnet_101":
net_fn = nets.resnet_v2.resnet_v2_101
elif resnet_type == "resnet_152":
net_fn = nets.resnet_v2.resnet_v2_152
else:
raise ValueError(
"Unknown type of ResNet: {}".format(resnet_type))
return ImageNetSpec(
scope=nets.resnet_v2.resnet_arg_scope,
image_size=(229, 229),
apply_net=lambda image: net_fn(
image, is_training=False, global_pool=False))
return get_resnet
# pylint: enable=import-error
SUPPORTED_NETWORKS = {
"alexnet_v2": get_alexnet,
"vgg_16": get_vgg_by_type("vgg16"),
"vgg_19": get_vgg_by_type("vgg19"),
"resnet_v2_50": get_resnet_by_type("resnet_50"),
"resnet_v2_101": get_resnet_by_type("resnet_101"),
"resnet_v2_152": get_resnet_by_type("resnet_152")
}
class ImageNet(ModelPart, SpatialStatefulWithOutput):
"""Pre-trained ImageNet network.
We use the ImageNet networks as they are in the tesnorflow/models
repository (https://github.com/tensorflow/models). In order use them, you
need to clone the repository and configure the ImageNet object such that it
has a full path to "research/slim" in the repository. Visit
https://github.com/tensorflow/models/tree/master/research/slim for
information about checkpoints of the pre-trained models.
"""
# pylint: disable=too-many-arguments
def __init__(self,
name: str,
data_id: str,
network_type: str,
slim_models_path: str,
load_checkpoint: str = None,
spatial_layer: str = None,
encoded_layer: str = None,
initializers: InitializerSpecs = None) -> None:
"""Initialize pre-trained ImageNet network.
Args:
name: Name of the model part (the ImageNet network, will be in its
scope, independently on `name`).
data_id: Id of series with images (list of 3D numpy arrays)
network_type: Identifier of ImageNet network from TFSlim.
spatial_layer: String identifier of the convolutional map
(model's endpoint). Check
TFSlim documentation for end point specifications.
encoded_layer: String id of the network layer that will be used as
input of a decoder. `None` means averaging the convolutional
maps.
path_to_models: Path to Slim models in tensorflow/models
repository.
load_checkpoint: Checkpoint file from which the pre-trained network
is loaded.
"""
check_argument_types()
ModelPart.__init__(self, name, load_checkpoint=load_checkpoint,
initializers=initializers, save_checkpoint=None)
sys.path.insert(0, slim_models_path)
self.data_id = data_id
self.network_type = network_type
self.spatial_layer = spatial_layer
self.encoded_layer = encoded_layer
if self.network_type not in SUPPORTED_NETWORKS:
raise ValueError(
"Network '{}' is not among the supported ones ({})".format(
self.network_type, ", ".join(SUPPORTED_NETWORKS.keys())))
net_specification = SUPPORTED_NETWORKS[self.network_type]()
self.height, self.width = net_specification.image_size
with tf_slim.arg_scope(net_specification.scope()):
_, self.end_points = net_specification.apply_net(self.input_image)
if (self.spatial_layer is not None
and self.spatial_layer not in self.end_points):
raise ValueError(
"Network '{}' does not contain endpoint '{}'".format(
self.network_type, self.spatial_layer))
if spatial_layer is not None:
net_output = self.end_points[self.spatial_layer]
if len(net_output.get_shape()) != 4:
raise ValueError(
("Endpoint '{}' for network '{}' cannot be "
"a convolutional map, its dimensionality is: {}."
).format(self.spatial_layer, self.network_type,
", ".join([str(d.value) for d in
net_output.get_shape()])))
if (self.encoded_layer is not None
and self.encoded_layer not in self.end_points):
raise ValueError(
"Network '{}' does not contain endpoint '{}'.".format(
self.network_type, self.encoded_layer))
@tensor
def input_image(self) -> tf.Tensor:
return tf.placeholder(
tf.float32, [None, self.height, self.width, 3])
@tensor
def spatial_states(self) -> Optional[tf.Tensor]:
if self.spatial_layer is None:
return None
net_output = self.end_points[self.spatial_layer]
net_output = tf.stop_gradient(net_output)
return net_output
@tensor
def spatial_mask(self) -> tf.Tensor:
if self.spatial_layer is None:
return None
mask = tf.ones(tf.shape(self.spatial_states)[:3])
# pylint: disable=no-member
mask.set_shape(self.spatial_states.get_shape()[:3])
# pylint: enable=no-member
return mask
@tensor
def output(self) -> tf.Tensor:
if self.encoded_layer is None:
return tf.reduce_mean(self.spatial_states, [1, 2])
encoded = tf.squeeze(self.end_points[self.encoded_layer], [1, 2])
encoded = tf.stop_gradient(encoded)
return encoded
def _init_saver(self) -> None:
if not self._saver:
with tf.variable_scope(self.name, reuse=True):
local_variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
slim_variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope=self.network_type)
self._saver = tf.train.Saver(
var_list=local_variables + slim_variables)
def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict:
images = np.array(dataset.get_series(self.data_id))
assert images.shape[1:] == (self.height, self.width, 3)
return {self.input_image: images}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.